#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
- # Generated by GNU Autoconf 2.69 for PostgreSQL 9.5alpha1 (Postgres-XL 9.5alpha1).
-# Generated by GNU Autoconf 2.69 for PostgreSQL 9.6beta4.
++# Generated by GNU Autoconf 2.69 for PostgreSQL 9.6beta4 (Postgres-XL 9.6alpha1).
#
-# Report bugs to <pgsql-bugs@postgresql.org>.
+# Report bugs to <bugs@postgres-xl.org>.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
# Identity of this package.
PACKAGE_NAME='PostgreSQL'
PACKAGE_TARNAME='postgresql'
- PACKAGE_VERSION='9.5alpha1 (Postgres-XL 9.5alpha1)'
- PACKAGE_XC_VERSION='9.5alpha1'
- PACKAGE_STRING='PostgreSQL 9.5alpha1 (Postgres-XL 9.5alpha1)'
-PACKAGE_VERSION='9.6beta4'
-PACKAGE_STRING='PostgreSQL 9.6beta4'
++PACKAGE_VERSION='9.6beta4 (Postgres-XL 9.6alpha1)'
++PACKAGE_XC_VERSION='9.6alpha1'
++PACKAGE_STRING='PostgreSQL 9.6beta4 (Postgres-XL 9.6alpha1)'
PACKAGE_URL=''
ac_unique_file="src/backend/access/common/heaptuple.c"
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
- \`configure' configures PostgreSQL 9.5alpha1 (Postgres-XL 9.5alpha1) to adapt to many kinds of systems.
-\`configure' configures PostgreSQL 9.6beta4 to adapt to many kinds of systems.
++\`configure' configures PostgreSQL 9.6beta4 (Postgres-XL 9.6alpha1) to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of PostgreSQL 9.5alpha1 (Postgres-XL 9.5alpha1):";;
- short | recursive ) echo "Configuration of PostgreSQL 9.6beta4:";;
++ short | recursive ) echo "Configuration of PostgreSQL 9.6beta4 (Postgres-XL 9.6alpha1):";;
esac
cat <<\_ACEOF
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
- PostgreSQL configure 9.5alpha1 (Postgres-XL 9.5alpha1)
-PostgreSQL configure 9.6beta4
++PostgreSQL configure 9.6beta4 (Postgres-XL 9.6alpha1)
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
- It was created by PostgreSQL $as_me 9.5alpha1 (Postgres-XL 9.5alpha1), which was
-It was created by PostgreSQL $as_me 9.6beta4, which was
++It was created by PostgreSQL $as_me 9.6beta4 (Postgres-XL 9.6alpha1), which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
_ACEOF
+# For PGXC, set -DPGXC by default. This can be overriden with -UPGXC if the user sets it.
+# For Postgres-XL, set both -DPGXC and -DXCP
+CFLAGS="-DPGXC -DXCP $CFLAGS"
+
# Begin output steps
{ $as_echo "$as_me:${as_lineno-$LINENO}: using compiler=$cc_string" >&5
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
- This file was extended by PostgreSQL $as_me 9.5alpha1 (Postgres-XL 9.5alpha1), which was
-This file was extended by PostgreSQL $as_me 9.6beta4, which was
++This file was extended by PostgreSQL $as_me 9.6beta4 (Postgres-XL 9.6alpha1), which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
- PostgreSQL config.status 9.5alpha1 (Postgres-XL 9.5alpha1)
-PostgreSQL config.status 9.6beta4
++PostgreSQL config.status 9.6beta4 (Postgres-XL 9.6alpha1)
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
dnl
m4_pattern_forbid(^PGAC_)dnl to catch undefined macros
m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required.
Untested combinations of 'autoconf' and PostgreSQL versions are not
pgcrypto \
pgrowlocks \
pgstattuple \
+ pgxc_clean \
+ pgxc_ctl \
+ pgxc_monitor \
+ pg_visibility \
postgres_fdw \
seg \
spi \
MODULES = citext
EXTENSION = citext
- DATA = citext--1.1.sql citext--1.0--1.1.sql citext--unpackaged--1.0.sql
+ DATA = citext--1.3.sql citext--1.2--1.3.sql citext--1.1--1.2.sql \
+ citext--1.0--1.1.sql citext--unpackaged--1.0.sql
PGFILEDESC = "citext - case-insensitive character string data type"
-REGRESS = citext
+REGRESS = citext xl_citext
ifdef USE_PGXS
PG_CONFIG = pg_config
(3 rows)
-- SIMILAR TO should be case-insensitive.
-SELECT name FROM srt WHERE name SIMILAR TO '%a.*';
+SELECT name FROM srt WHERE name SIMILAR TO '%a.*' order by name;
name
------
- AAA
- aba
- (2 rows)
+ ABA
+ (1 row)
-SELECT name FROM srt WHERE name SIMILAR TO '%A.*';
+SELECT name FROM srt WHERE name SIMILAR TO '%A.*' order by name;
name
------
- AAA
- aba
- (2 rows)
+ ABA
+ (1 row)
-- Explicit casts.
SELECT true::citext = 'true' AS t;
PG_CPPFLAGS = -DLOWER_NODE
EXTENSION = ltree
- DATA = ltree--1.0.sql ltree--unpackaged--1.0.sql
+ DATA = ltree--1.1.sql ltree--1.0--1.1.sql ltree--unpackaged--1.0.sql
PGFILEDESC = "ltree - hierarchical label data type"
-REGRESS = ltree
+REGRESS = ltree xl_ltree
ifdef USE_PGXS
PG_CONFIG = pg_config
Operating System (example: Linux 2.4.18) :
- PostgreSQL version (example: PostgreSQL 9.5alpha1): Postgres-XL 9.5alpha1
- PostgreSQL version (example: PostgreSQL 9.6beta4): PostgreSQL 9.6beta4
++ PostgreSQL version (example: PostgreSQL 9.6beta4): Postgres-XL 9.6alpha1
Compiler used (example: gcc 3.3.5) :
&pgstatstatements;
&pgstattuple;
&pgtrgm;
+ &pgxcclean;
+ &pgxcctl;
+ &pgxcddl;
+ &pgxcmonitor;
+ &pgvisibility;
&postgres-fdw;
&seg;
&sepgsql;
usually best to follow it.
</para>
+ <para>
+ As mentioned when discussing <type>UNIQUE</> constraint, the distribution column
+ must be included in <type>PRIMARY KEY</type>. Other restrictions
+ apply to the <type>PRIMARY KEY</> as well. When an expression is used on
+ a <type>PRIMARY KEY</> constraint, this expression must contain
+ the distribution column of its parent table. It cannot use other
+ columns as well.
+ </para>
+ <para>
+ Primary keys are useful both for
+ documentation purposes and for client applications. For example,
+ a GUI application that allows modifying row values probably needs
+ to know the primary key of a table to be able to identify rows
+ uniquely. There are also various ways in which the database system
+ makes use of a primary key if one has been declared; for example,
+ the primary key defines the default target column(s) for foreign keys
+ referencing its table.
+ </para>
</sect2>
<sect2 id="ddl-constraints-fk">
<!ENTITY pgstatstatements SYSTEM "pgstatstatements.sgml">
<!ENTITY pgstattuple SYSTEM "pgstattuple.sgml">
<!ENTITY pgtrgm SYSTEM "pgtrgm.sgml">
+ <!ENTITY pgvisibility SYSTEM "pgvisibility.sgml">
<!ENTITY postgres-fdw SYSTEM "postgres-fdw.sgml">
+<!ENTITY pgxcclean SYSTEM "pgxcclean.sgml">
+<!ENTITY pgxcctl SYSTEM "pgxc_ctl-ref.sgml">
+<!ENTITY pgxcddl SYSTEM "pgxcddl.sgml">
+<!ENTITY pgxcmonitor SYSTEM "pgxcmonitor.sgml">
<!ENTITY seg SYSTEM "seg.sgml">
<!ENTITY contrib-spi SYSTEM "contrib-spi.sgml">
<!ENTITY sepgsql SYSTEM "sepgsql.sgml">
<!ENTITY sourcerepo SYSTEM "sourcerepo.sgml">
<!ENTITY release SYSTEM "release.sgml">
+ <!ENTITY release-9.6 SYSTEM "release-9.6.sgml">
+<!ENTITY release-xl-9.5r1 SYSTEM "release-xl-9.5r1.sgml">
<!ENTITY release-9.5 SYSTEM "release-9.5.sgml">
<!ENTITY release-9.4 SYSTEM "release-9.4.sgml">
<!ENTITY release-9.3 SYSTEM "release-9.3.sgml">
<!-- doc/src/sgml/legal.sgml -->
- <date>2015</date>
+ <date>2016</date>
<copyright>
- <year>1996-2015</year>
+ <year>1996-2016</year>
<holder>The PostgreSQL Global Development Group</holder>
</copyright>
+<copyright>
+ <year>2014-2016</year>
+ <holder>Postgres-XL Development Group</holder>
+</copyright>
+<copyright>
+ <year>2009-2012</year>
+ <holder>Postgres-XC Development Group</holder>
+</copyright>
+<copyright>
+ <year>2012-2014</year>
+ <holder>TransLattice, Inc.</holder>
+</copyright>
+<copyright>
+ <year>2015-2016</year>
+ <holder>2ndQuadrant Ltd</holder>
+</copyright>
<legalnotice id="legalnotice">
<title>Legal Notice</title>
<!ENTITY commit SYSTEM "commit.sgml">
<!ENTITY commitPrepared SYSTEM "commit_prepared.sgml">
<!ENTITY copyTable SYSTEM "copy.sgml">
+ <!ENTITY createAccessMethod SYSTEM "create_access_method.sgml">
<!ENTITY createAggregate SYSTEM "create_aggregate.sgml">
+<!ENTITY createBarrier system "create_barrier.sgml">
<!ENTITY createCast SYSTEM "create_cast.sgml">
<!ENTITY createCollation SYSTEM "create_collation.sgml">
<!ENTITY createConversion SYSTEM "create_conversion.sgml">
[ , SSPACE = <replaceable class="PARAMETER">state_data_size</replaceable> ]
[ , FINALFUNC = <replaceable class="PARAMETER">ffunc</replaceable> ]
[ , FINALFUNC_EXTRA ]
+ [ , COMBINEFUNC = <replaceable class="PARAMETER">combinefunc</replaceable> ]
+ [ , SERIALFUNC = <replaceable class="PARAMETER">serialfunc</replaceable> ]
+ [ , DESERIALFUNC = <replaceable class="PARAMETER">deserialfunc</replaceable> ]
[ , INITCOND = <replaceable class="PARAMETER">initial_condition</replaceable> ]
+ [ , INITCOLLECT = <replaceable class="PARAMETER">initial_collection_condition</replaceable> ]
[ , MSFUNC = <replaceable class="PARAMETER">msfunc</replaceable> ]
[ , MINVFUNC = <replaceable class="PARAMETER">minvfunc</replaceable> ]
[ , MSTYPE = <replaceable class="PARAMETER">mstate_data_type</replaceable> ]
[ , FINALFUNC = <replaceable class="PARAMETER">ffunc</replaceable> ]
[ , FINALFUNC_EXTRA ]
[ , INITCOND = <replaceable class="PARAMETER">initial_condition</replaceable> ]
+ [ , INITCOLLECT = <replaceable class="PARAMETER">initial_collection_condition</replaceable> ]
+ [ , PARALLEL = { SAFE | RESTRICTED | UNSAFE } ]
[ , HYPOTHETICAL ]
)
[ , SSPACE = <replaceable class="PARAMETER">state_data_size</replaceable> ]
[ , FINALFUNC = <replaceable class="PARAMETER">ffunc</replaceable> ]
[ , FINALFUNC_EXTRA ]
+ [ , COMBINEFUNC = <replaceable class="PARAMETER">combinefunc</replaceable> ]
+ [ , SERIALFUNC = <replaceable class="PARAMETER">serialfunc</replaceable> ]
+ [ , DESERIALFUNC = <replaceable class="PARAMETER">deserialfunc</replaceable> ]
[ , INITCOND = <replaceable class="PARAMETER">initial_condition</replaceable> ]
+ [ , INITCOLLECT = <replaceable class="PARAMETER">initial_collection_condition</replaceable> ]
[ , MSFUNC = <replaceable class="PARAMETER">msfunc</replaceable> ]
[ , MINVFUNC = <replaceable class="PARAMETER">minvfunc</replaceable> ]
[ , MSTYPE = <replaceable class="PARAMETER">mstate_data_type</replaceable> ]
</para>
<para>
- The primary key constraint should name a set of columns that is
- different from other sets of columns named by any unique
- constraint defined for the same table.
+ <literal>PRIMARY KEY</literal> enforces the same data constraints as
+ a combination of <literal>UNIQUE</> and <literal>NOT NULL</>, but
+ identifying a set of columns as the primary key also provides metadata
+ about the design of the schema, since a primary key implies that other
+ tables can rely on this set of columns as a unique identifier for rows.
</para>
+
+ <para>
+ In <productname>Postgres-XL</>, if <command>DISTRIBUTE BY REPLICATION</> is not specified, the
+ distribution key must be included in the set of primary key
+ columns.
+ </para>
</listitem>
</varlistentry>
<command>pg_resetxlog</command> to run. But before you do
so, make doubly certain that there is no server process still alive.
</para>
+
+ <para>
+ In <productname>Postgres-XL</>, <command>pg_resetxlog</command>
+ will only run locally for Coordinators and Datanodes. You should run it
+ for each Coordinator or Datanode manually.
+ </para>
</refsect1>
+ <refsect1>
+ <title>See Also</title>
+
+ <simplelist type="inline">
+ <member><xref linkend="app-pgcontroldata"></member>
+ </simplelist>
+ </refsect1>
</refentry>
&commit;
&commitPrepared;
©Table;
+ &createAccessMethod;
&createAggregate;
+ &createBarrier;
&createCast;
&createCollation;
&createConversion;
The reason for splitting the release notes this way is so that appropriate
subsets can easily be copied into back branches.
-->
+ &release-9.6;
+&release-xl-9.5r1;
&release-9.5;
&release-9.4;
&release-9.3;
backend/utils/mb/conversion_procs \
backend/snowball \
include \
- interfaces \
backend/replication/libpqwalreceiver \
+ fe_utils \
bin \
pl \
makefiles \
# PostgreSQL version number
VERSION = @PACKAGE_VERSION@
+XLVERSION = @PACKAGE_XC_VERSION@
MAJORVERSION = @PG_MAJORVERSION@
+ VERSION_NUM = @PG_VERSION_NUM@
- # Support for VPATH builds
- # (PGXS VPATH support is handled separately in pgxs.mk)
- ifndef PGXS
+ # Set top_srcdir, srcdir, and VPATH.
+ ifdef PGXS
+ top_srcdir = $(top_builddir)
+
+ # If VPATH is set or Makefile is not in current directory we are building
+ # the extension with VPATH so we set the variable here.
+ ifdef VPATH
+ srcdir = $(VPATH)
+ else
+ ifeq ($(CURDIR),$(dir $(firstword $(MAKEFILE_LIST))))
+ srcdir = .
+ VPATH =
+ else
+ srcdir = $(dir $(firstword $(MAKEFILE_LIST)))
+ VPATH = $(srcdir)
+ endif
+ endif
+ else # not PGXS
vpath_build = @vpath_build@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
* and we'd like to still refer to them via C struct offsets.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* clients and standalone backends are supported here).
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* reinitialize scan descriptor
*/
initscan(scan, key, true);
+
+ /*
+ * reset parallel scan, if present
+ */
+ if (scan->rs_parallel != NULL)
+ {
+ ParallelHeapScanDesc parallel_scan;
+
+ /*
+ * Caller is responsible for making sure that all workers have
+ * finished the scan before calling this, so it really shouldn't be
+ * necessary to acquire the mutex at all. We acquire it anyway, just
+ * to be tidy.
+ */
+ parallel_scan = scan->rs_parallel;
+ SpinLockAcquire(¶llel_scan->phs_mutex);
+ parallel_scan->phs_cblock = parallel_scan->phs_startblock;
+ SpinLockRelease(¶llel_scan->phs_mutex);
+ }
+ }
+
+ /* ----------------
+ * heap_rescan_set_params - restart a relation scan after changing params
+ *
+ * This call allows changing the buffer strategy, syncscan, and pagemode
+ * options before starting a fresh scan. Note that although the actual use
+ * of syncscan might change (effectively, enabling or disabling reporting),
+ * the previously selected startblock will be kept.
+ * ----------------
+ */
+ void
+ heap_rescan_set_params(HeapScanDesc scan, ScanKey key,
+ bool allow_strat, bool allow_sync, bool allow_pagemode)
+ {
+ /* adjust parameters */
+ scan->rs_allow_strat = allow_strat;
+ scan->rs_allow_sync = allow_sync;
+ scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(scan->rs_snapshot);
+ /* ... and rescan */
+ heap_rescan(scan, key);
}
+/* ----------------
+ * heap_rescan_set_params - restart a relation scan after changing params
+ *
+ * This call allows changing the buffer strategy, syncscan, and pagemode
+ * options before starting a fresh scan. Note that although the actual use
+ * of syncscan might change (effectively, enabling or disabling reporting),
+ * the previously selected startblock will be kept.
+ * ----------------
+ */
+void
+heap_rescan_set_params(HeapScanDesc scan, ScanKey key,
+ bool allow_strat, bool allow_sync, bool allow_pagemode)
+{
+ /* adjust parameters */
+ scan->rs_allow_strat = allow_strat;
+ scan->rs_allow_sync = allow_sync;
+ scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(scan->rs_snapshot);
+ /* ... and rescan */
+ heap_rescan(scan, key);
+}
+
/* ----------------
* heap_endscan - end relation scan
*
RelationIsAccessibleInLogicalDecoding(relation))
OldestXmin = RecentGlobalXmin;
else
- OldestXmin = RecentGlobalDataXmin;
+ OldestXmin =
+ TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
+ relation);
- Assert(TransactionIdIsValid(OldestXmin));
+ if (!TransactionIdIsValid(OldestXmin))
+ return;
/*
* Let's see if we really need pruning.
top_builddir = ../../../..
include $(top_builddir)/src/Makefile.global
- OBJS = barrierdesc.o brindesc.o clogdesc.o committsdesc.o dbasedesc.o gindesc.o gistdesc.o \
- hashdesc.o heapdesc.o mxactdesc.o nbtdesc.o relmapdesc.o \
- replorigindesc.o seqdesc.o smgrdesc.o spgdesc.o \
- standbydesc.o tblspcdesc.o xactdesc.o xlogdesc.o
-OBJS = brindesc.o clogdesc.o committsdesc.o dbasedesc.o genericdesc.o \
++OBJS = barrierdesc.o brindesc.o clogdesc.o committsdesc.o dbasedesc.o genericdesc.o \
+ gindesc.o gistdesc.o hashdesc.o heapdesc.o logicalmsgdesc.o \
+ mxactdesc.o nbtdesc.o relmapdesc.o replorigindesc.o seqdesc.o \
+ smgrdesc.o spgdesc.o standbydesc.o tblspcdesc.o xactdesc.o xlogdesc.o
include $(top_srcdir)/src/backend/common.mk
else if (info == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *) rec;
- char *path = relpathperm(xlrec->rnode, MAIN_FORKNUM);
+ char *path = relpathperm_client(xlrec->rnode, MAIN_FORKNUM, "");
- appendStringInfo(buf, "%s to %u blocks", path, xlrec->blkno);
+ appendStringInfo(buf, "%s to %u blocks flags %d", path,
+ xlrec->blkno, xlrec->flags);
pfree(path);
}
}
top_builddir = ../../../..
include $(top_builddir)/src/Makefile.global
- OBJS = clog.o commit_ts.o multixact.o parallel.o rmgr.o slru.o subtrans.o \
- timeline.o transam.o twophase.o twophase_rmgr.o varsup.o \
+ OBJS = clog.o commit_ts.o generic_xlog.o multixact.o parallel.o rmgr.o slru.o \
+ subtrans.o timeline.o transam.o twophase.o twophase_rmgr.o varsup.o \
xact.o xlog.o xlogarchive.o xlogfuncs.o \
- xloginsert.o xlogreader.o xlogutils.o
+ xloginsert.o xlogreader.o xlogutils.o gtm.o
include $(top_srcdir)/src/backend/common.mk
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/backend/access/transam/clog.c
*
#include "commands/dbcommands_xlog.h"
#include "commands/sequence.h"
#include "commands/tablespace.h"
+#ifdef PGXC
+#include "pgxc/barrier.h"
+#endif
+ #include "replication/message.h"
#include "replication/origin.h"
#include "storage/standby.h"
#include "utils/relmapper.h"
* data across crashes. During database startup, we simply force the
* currently-active page of SUBTRANS to zeroes.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/backend/access/transam/subtrans.c
*
* twophase.c
* Two-phase commit support functions.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* IDENTIFICATION
* src/backend/access/transam/twophase.c
*
* typedef struct GlobalTransactionData *GlobalTransaction appears in
* twophase.h
+ *
+ * Note that the max value of GIDSIZE must fit in the uint16 gidlen,
+ * specified in TwoPhaseFileHeader.
*/
-#define GIDSIZE 200
+#define GIDSIZE (200 + (MAX_COORDINATORS + MAX_DATANODES) * 15)
typedef struct GlobalTransactionData
{
* varsup.c
* postgres OID & XID variables support routines
*
- * Copyright (c) 2000-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
+ * Copyright (c) 2000-2016, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/backend/access/transam/varsup.c
*
* See src/backend/access/transam/README for more information.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
#include "catalog/pg_database.h"
#include "commands/tablespace.h"
#include "miscadmin.h"
+#ifdef PGXC
+#include "pgxc/barrier.h"
+#endif
#include "pgstat.h"
#include "postmaster/bgwriter.h"
+ #include "postmaster/walwriter.h"
#include "postmaster/startup.h"
#include "replication/basebackup.h"
#include "replication/logical.h"
#include <unistd.h>
-
+#include "miscadmin.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xlogutils.h"
* routines to support running postgres in 'bootstrap' mode
* bootstrap mode is used to create the initial template database
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* IDENTIFICATION
* src/backend/bootstrap/bootstrap.c
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "pg_getopt.h"
+ #include "pgstat.h"
#include "postmaster/bgwriter.h"
+#include "postmaster/clustermon.h"
#include "postmaster/startup.h"
#include "postmaster/walwriter.h"
#include "replication/walreceiver.h"
pg_ts_config.h pg_ts_config_map.h pg_ts_dict.h \
pg_ts_parser.h pg_ts_template.h pg_extension.h \
pg_foreign_data_wrapper.h pg_foreign_server.h pg_user_mapping.h \
+ pgxc_class.h pgxc_node.h pgxc_group.h \
pg_foreign_table.h pg_policy.h pg_replication_origin.h \
- pg_default_acl.h pg_seclabel.h pg_shseclabel.h \
+ pg_default_acl.h pg_init_privs.h pg_seclabel.h pg_shseclabel.h \
pg_collation.h pg_range.h pg_transform.h \
toasting.h indexing.h \
)
* bits of hard-wired knowledge
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
switch (relpersistence)
{
case RELPERSISTENCE_TEMP:
- backend = MyBackendId;
+#ifdef XCP
+ if (OidIsValid(MyCoordId))
+ backend = MyFirstBackendId;
+ else
+#endif
+ backend = BackendIdForTempRelations();
break;
case RELPERSISTENCE_UNLOGGED:
case RELPERSISTENCE_PERMANENT:
* Routines to support inter-object dependencies.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* IDENTIFICATION
* src/backend/catalog/dependency.c
UserMappingRelationId, /* OCLASS_USER_MAPPING */
DefaultAclRelationId, /* OCLASS_DEFACL */
ExtensionRelationId, /* OCLASS_EXTENSION */
+#ifdef PGXC
+ PgxcClassRelationId, /* OCLASS_PGXCCLASS */
+#endif
EventTriggerRelationId, /* OCLASS_EVENT_TRIGGER */
- PolicyRelationId /* OCLASS_POLICY */
+ PolicyRelationId, /* OCLASS_POLICY */
+ TransformRelationId /* OCLASS_TRANSFORM */
};
* heap.c
* code to create and destroy POSTGRES heap relations
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
* and implementing search-path-controlled searches.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* pg_aggregate.c
* routines to support manipulation of the pg_aggregate relation
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
List *parameterDefaults,
Oid variadicArgType,
List *aggtransfnName,
+#ifdef PGXC
+ List *aggcollectfnName,
+#endif
List *aggfinalfnName,
+ List *aggcombinefnName,
+ List *aggserialfnName,
+ List *aggdeserialfnName,
List *aggmtransfnName,
List *aggminvtransfnName,
List *aggmfinalfnName,
Oid aggmTransType,
int32 aggmTransSpace,
const char *agginitval,
- const char *aggminitval)
+#ifdef PGXC
+ const char *agginitcollect,
+#endif
+ const char *aggminitval,
+ char proparallel)
{
Relation aggdesc;
HeapTuple tup;
Datum values[Natts_pg_aggregate];
Form_pg_proc proc;
Oid transfn;
+#ifdef PGXC
+ Oid collectfn = InvalidOid; /* can be omitted */
+#endif
Oid finalfn = InvalidOid; /* can be omitted */
+ Oid combinefn = InvalidOid; /* can be omitted */
+ Oid serialfn = InvalidOid; /* can be omitted */
+ Oid deserialfn = InvalidOid; /* can be omitted */
Oid mtransfn = InvalidOid; /* can be omitted */
Oid minvtransfn = InvalidOid; /* can be omitted */
Oid mfinalfn = InvalidOid; /* can be omitted */
* pg_proc.c
* routines to support manipulation of the pg_proc relation
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* storage.c
* code to create and destroy physical storage for relations
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
switch (relpersistence)
{
case RELPERSISTENCE_TEMP:
++<<<<<<< HEAD
+#ifdef XCP
+ if (OidIsValid(MyCoordId))
+ backend = MyFirstBackendId;
+ else
+#endif
+ backend = MyBackendId;
++=======
+ backend = BackendIdForTempRelations();
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
needs_wal = false;
break;
case RELPERSISTENCE_UNLOGGED:
*
* Routines for aggregate-manipulation commands
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
initval = defGetString(defel);
else if (pg_strcasecmp(defel->defname, "initcond1") == 0)
initval = defGetString(defel);
+#ifdef PGXC
+ else if (pg_strcasecmp(defel->defname, "cfunc") == 0)
+ collectfuncName = defGetQualifiedName(defel);
+ else if (pg_strcasecmp(defel->defname, "initcollect") == 0)
+ initcollect = defGetString(defel);
+#endif
else if (pg_strcasecmp(defel->defname, "minitcond") == 0)
minitval = defGetString(defel);
+ else if (pg_strcasecmp(defel->defname, "parallel") == 0)
+ parallel = defGetString(defel);
else
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
format_type_be(transTypeId))));
}
+#ifdef XCP
+ /*
+ * look up the aggregate's collecttype.
+ *
+ * to the collecttype applied all the limitations as to the transtype.
+ */
+ if (collectType)
+ {
+ collectTypeId = typenameTypeId(NULL, collectType);
+ if (get_typtype(collectTypeId) == TYPTYPE_PSEUDO &&
+ !IsPolymorphicType(collectTypeId))
+ {
+ if (collectTypeId == INTERNALOID && superuser())
+ /* okay */ ;
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("aggregate collection data type cannot be %s",
+ format_type_be(collectTypeId))));
+ }
+ }
+ else
+ collectTypeId = InvalidOid;
+#endif
+ if (serialfuncName && deserialfuncName)
+ {
+ /*
+ * Serialization is only needed/allowed for transtype INTERNAL.
+ */
+ if (transTypeId != INTERNALOID)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("serialization functions may be specified only when the aggregate transition data type is %s",
+ format_type_be(INTERNALOID))));
+ }
+ else if (serialfuncName || deserialfuncName)
+ {
+ /*
+ * Cannot specify one function without the other.
+ */
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("must specify both or neither of serialization and deserialization functions")));
+ }
/*
* If a moving-aggregate transtype is specified, look that up. Same
parameterDefaults,
variadicArgType,
transfuncName, /* step function name */
+#ifdef PGXC
+ collectfuncName, /* collect function name */
+#endif
finalfuncName, /* final function name */
+ combinefuncName, /* combine function name */
+ serialfuncName, /* serial function name */
+ deserialfuncName, /* deserial function name */
mtransfuncName, /* fwd trans function name */
minvtransfuncName, /* inv trans function name */
mfinalfuncName, /* final function name */
mtransTypeId, /* transition data type */
mtransSpace, /* transition space */
initval, /* initial condition */
- minitval); /* initial condition */
+#ifdef PGXC
+ initcollect, /* initial condition for collection function */
+#endif
+ minitval, /* initial condition */
+ proparallel); /* parallel safe? */
}
* analyze.c
* the Postgres statistics generator
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
*
* PostgreSQL object comments utility code.
*
- * Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
+ * Copyright (c) 1996-2016, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/backend/commands/comment.c
* copy.c
* Implements the COPY utility command
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include "access/sysattr.h"
#include "access/xact.h"
#include "access/xlog.h"
- #include "catalog/namespace.h"
#include "catalog/pg_type.h"
+#ifdef XCP
+#include "catalog/dependency.h"
+#include "commands/sequence.h"
+#endif
#include "commands/copy.h"
#include "commands/defrem.h"
#include "commands/trigger.h"
#include "miscadmin.h"
#include "optimizer/clauses.h"
#include "optimizer/planner.h"
- #include "parser/parse_relation.h"
+#ifdef PGXC
+#include "pgxc/pgxc.h"
+#include "pgxc/execRemote.h"
+#include "pgxc/locator.h"
+#include "pgxc/remotecopy.h"
+#include "nodes/nodes.h"
+#include "pgxc/poolmgr.h"
+#include "catalog/pgxc_node.h"
+#endif
#include "nodes/makefuncs.h"
+#include "optimizer/pgxcship.h"
#include "rewrite/rewriteHandler.h"
#include "storage/fd.h"
#include "tcop/tcopprot.h"
char *raw_buf;
int raw_buf_index; /* next byte to process */
int raw_buf_len; /* total # of bytes stored */
+#ifdef PGXC
+ /* Remote COPY state data */
+ RemoteCopyData *remoteCopyState;
+#endif
} CopyStateData;
- /* DestReceiver for COPY (SELECT) TO */
+ /* DestReceiver for COPY (query) TO */
typedef struct
{
DestReceiver pub; /* publicly-known function pointers */
case OCLASS_USER_MAPPING:
case OCLASS_DEFACL:
case OCLASS_EXTENSION:
+#ifdef PGXC
+ case OCLASS_PGXC_CLASS:
+ case OCLASS_PGXC_NODE:
+ case OCLASS_PGXC_GROUP:
+#endif
case OCLASS_POLICY:
+ case OCLASS_AM:
return true;
-
- case MAX_OCLASS:
-
- /*
- * This shouldn't ever happen, but we keep the case to avoid a
- * compiler warning without a "default" clause in the switch.
- */
- Assert(false);
- break;
}
return true;
* explain.c
* Explain query execution plans
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
case T_WorkTableScan:
pname = sname = "WorkTable Scan";
break;
+#ifdef PGXC
+ case T_RemoteQuery:
+ pname = "Remote Fast Query Execution";
+ break;
+#endif
case T_ForeignScan:
- pname = sname = "Foreign Scan";
+ sname = "Foreign Scan";
+ switch (((ForeignScan *) plan)->operation)
+ {
+ case CMD_SELECT:
+ pname = "Foreign Scan";
+ operation = "Select";
+ break;
+ case CMD_INSERT:
+ pname = "Foreign Insert";
+ operation = "Insert";
+ break;
+ case CMD_UPDATE:
+ pname = "Foreign Update";
+ operation = "Update";
+ break;
+ case CMD_DELETE:
+ pname = "Foreign Delete";
+ operation = "Delete";
+ break;
+ default:
+ pname = "???";
+ break;
+ }
break;
+#ifdef XCP
+ case T_RemoteSubplan:
+ pname = sname = "Remote Subquery Scan";
+ break;
+#endif /* XCP */
case T_CustomScan:
sname = "Custom Scan";
custom_name = ((CustomScan *) plan)->methods->CustomName;
pname = sname = "Group";
break;
case T_Agg:
- sname = "Aggregate";
- switch (((Agg *) plan)->aggstrategy)
{
- case AGG_PLAIN:
- pname = "Aggregate";
- strategy = "Plain";
- break;
- case AGG_SORTED:
- pname = "GroupAggregate";
- strategy = "Sorted";
- break;
- case AGG_HASHED:
- pname = "HashAggregate";
- strategy = "Hashed";
- break;
- default:
- pname = "Aggregate ???";
- strategy = "???";
- break;
+ Agg *agg = (Agg *) plan;
+
+ sname = "Aggregate";
+ switch (agg->aggstrategy)
+ {
+ case AGG_PLAIN:
+ pname = "Aggregate";
+ strategy = "Plain";
+ break;
+ case AGG_SORTED:
+ pname = "GroupAggregate";
+ strategy = "Sorted";
+ break;
+ case AGG_HASHED:
+ pname = "HashAggregate";
+ strategy = "Hashed";
+ break;
+ default:
+ pname = "Aggregate ???";
+ strategy = "???";
+ break;
+ }
+
+ if (DO_AGGSPLIT_SKIPFINAL(agg->aggsplit))
+ {
+ partialmode = "Partial";
+ pname = psprintf("%s %s", partialmode, pname);
+ }
+ else if (DO_AGGSPLIT_COMBINE(agg->aggsplit))
+ {
+ partialmode = "Finalize";
+ pname = psprintf("%s %s", partialmode, pname);
+ }
+ else
+ partialmode = "Simple";
}
+#ifdef XCP
+ switch (((Agg *) plan)->aggstrategy)
+ {
+ case AGG_SLAVE:
+ operation = "Transition";
+ break;
+ case AGG_MASTER:
+ operation = "Collection";
+ break;
+ default:
+ operation = NULL;
+ break;
+ }
+#endif
+
break;
case T_WindowAgg:
pname = sname = "WindowAgg";
* indexcmds.c
* POSTGRES define and remove index code.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
* storage management for portals (but doesn't run any queries in them).
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* schemacmds.c
* schema creation/manipulation commands
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* sequence.c
* PostgreSQL sequences support code.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
* tablecmds.c
* Commands for creating and altering table structures and settings
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
static void ATExecDropOf(Relation rel, LOCKMODE lockmode);
static void ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode);
static void ATExecGenericOptions(Relation rel, List *options);
+#ifdef PGXC
+static void AtExecDistributeBy(Relation rel, DistributeBy *options);
+static void AtExecSubCluster(Relation rel, PGXCSubCluster *options);
+static void AtExecAddNode(Relation rel, List *options);
+static void AtExecDeleteNode(Relation rel, List *options);
+static void ATCheckCmd(Relation rel, AlterTableCmd *cmd);
+static RedistribState *BuildRedistribCommands(Oid relid, List *subCmds);
+static Oid *delete_node_list(Oid *old_oids, int old_num, Oid *del_oids, int del_num, int *new_num);
+static Oid *add_node_list(Oid *old_oids, int old_num, Oid *add_oids, int add_num, int *new_num);
+#endif
static void ATExecEnableRowSecurity(Relation rel);
static void ATExecDisableRowSecurity(Relation rel);
+ static void ATExecForceNoForceRowSecurity(Relation rel, bool force_rls);
static void copy_relation_data(SMgrRelation rel, SMgrRelation dst,
ForkNumber forkNum, char relpersistence);
* trigger.c
* PostgreSQL TRIGGERs support code.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* in cluster.c.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
* Routines for handling specialized SET variables.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* view.c
* use rewrite rules to construct views
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
nodeSamplescan.o nodeSeqscan.o nodeSetOp.o nodeSort.o nodeUnique.o \
nodeValuesscan.o nodeCtescan.o nodeWorktablescan.o \
nodeGroup.o nodeSubplan.o nodeSubqueryscan.o nodeTidscan.o \
- nodeForeignscan.o nodeWindowAgg.o producerReceiver.o tstoreReceiver.o spi.o
- nodeForeignscan.o nodeWindowAgg.o tstoreReceiver.o tqueue.o spi.o
++ nodeForeignscan.o nodeWindowAgg.o producerReceiver.o tstoreReceiver.o tqueue.o spi.o
include $(top_srcdir)/src/backend/common.mk
* execAmi.c
* miscellaneous executor access method routines
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/backend/executor/execAmi.c
* execCurrent.c
* executor support for WHERE CURRENT OF cursor
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/backend/executor/execCurrent.c
* before ExecutorEnd. This can be omitted only in case of EXPLAIN,
* which should also omit ExecutorRun.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* ExecProcNode, or ExecEndNode on its subnodes and do the appropriate
* processing.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include "executor/nodeValuesscan.h"
#include "executor/nodeWindowAgg.h"
#include "executor/nodeWorktablescan.h"
+ #include "nodes/nodeFuncs.h"
#include "miscadmin.h"
-
+#ifdef PGXC
+#include "pgxc/execRemote.h"
+#endif
/* ------------------------------------------------------------------------
* ExecInitNode
* This information is needed by routines manipulating tuples
* (getattribute, formtuple, etc.).
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include "nodes/nodeFuncs.h"
#include "storage/bufmgr.h"
#include "utils/builtins.h"
- #include "utils/expandeddatum.h"
#include "utils/lsyscache.h"
#include "utils/typcache.h"
-
+#ifdef XCP
+#include "pgxc/pgxc.h"
+#include "utils/memutils.h"
+#endif
static TupleDesc ExecTypeFromTLInternal(List *targetList,
bool hasoid, bool skipjunk);
* execUtils.c
* miscellaneous executor utility routines
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
*
* TODO: AGG_HASHED doesn't support multiple grouping sets yet.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
*/
int numTransInputs;
- /*
- * Number of arguments to pass to the finalfn. This is always at least 1
- * (the transition state value) plus any ordered-set direct args. If the
- * finalfn wants extra args then we pass nulls corresponding to the
- * aggregated input columns.
- */
- int numFinalArgs;
-
- /* Oids of transfer functions */
+ /* Oid of the state transition or combine function */
Oid transfn_oid;
+ Oid finalfn_oid; /* may be InvalidOid */
+#ifdef PGXC
+ Oid collectfn_oid; /* may be InvalidOid */
+#endif /* PGXC */
+ /* Oid of the serialization function or InvalidOid */
+ Oid serialfn_oid;
+
+ /* Oid of the deserialization function or InvalidOid */
+ Oid deserialfn_oid;
+
+ /* Oid of state value's datatype */
+ Oid aggtranstype;
+
+ /* ExprStates of the FILTER and argument expressions. */
+ ExprState *aggfilter; /* state of FILTER expression, if any */
+ List *args; /* states of aggregated-argument expressions */
+ List *aggdirectargs; /* states of direct-argument expressions */
+
/*
- * fmgr lookup data for transfer functions --- only valid when
- * corresponding oid is not InvalidOid. Note in particular that fn_strict
- * flags are kept here.
+ * fmgr lookup data for transition function or combine function. Note in
+ * particular that the fn_strict flag is kept here.
*/
FmgrInfo transfn;
+ FmgrInfo finalfn;
+#ifdef PGXC
+ FmgrInfo collectfn;
+#endif /* PGXC */
+ /* fmgr lookup data for serialization function */
+ FmgrInfo serialfn;
+
+ /* fmgr lookup data for deserialization function */
+ FmgrInfo deserialfn;
+
/* Input collation derived for aggregate */
Oid aggCollation;
*/
Datum initValue;
bool initValueIsNull;
+#ifdef PGXC
+ Datum initCollectValue;
+ bool initCollectValueIsNull;
+#endif /* PGXC */
/*
- * We need the len and byval info for the agg's input, result, and
- * transition data types in order to know how to copy/delete values.
+ * We need the len and byval info for the agg's input and transition data
+ * types in order to know how to copy/delete values.
*
* Note that the info for the input type is used only when handling
* DISTINCT aggs with just one argument, so there is only one input type.
* aggregates like max() and min().) The noTransValue flag signals that we
* still need to do this.
*/
+ pergroupstate->noTransValue = peraggstate->initValueIsNull;
+
+#ifdef PGXC
+ /*
+ * (Re)set collectValue to the initial value.
+ *
+ * Note that when the initial value is pass-by-ref, we must copy it
+ * (into the aggcontext) since we will pfree the collectValue later.
+ * collection type is same as transition type.
+ */
+ if (OidIsValid(peraggstate->collectfn_oid))
+ {
+ if (peraggstate->initCollectValueIsNull)
+ pergroupstate->collectValue = peraggstate->initCollectValue;
+ else
+ {
+ MemoryContext oldContext;
+
+ oldContext = MemoryContextSwitchTo(
+ aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
+ pergroupstate->collectValue = datumCopy(peraggstate->initCollectValue,
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
+ MemoryContextSwitchTo(oldContext);
+ }
+ pergroupstate->collectValueIsNull = peraggstate->initCollectValueIsNull;
+
+ /*
+ * If the initial value for the transition state doesn't exist in the
+ * pg_aggregate table then we will let the first non-NULL value
+ * returned from the outer procNode become the initial value. (This is
+ * useful for aggregates like max() and min().) The noTransValue flag
+ * signals that we still need to do this.
+ */
+ pergroupstate->noCollectValue = peraggstate->initCollectValueIsNull;
+ }
+#endif /* PGXC */
+ pergroupstate->noTransValue = pertrans->initValueIsNull;
}
/*
FunctionCallInfoData fcinfo;
bool anynull = false;
MemoryContext oldContext;
+#ifdef XCP
+ Datum value;
+ bool isnull;
+#endif
int i;
ListCell *lc;
+ AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
+#ifdef XCP
+ if (OidIsValid(peraggstate->collectfn_oid))
+ {
+ FunctionCallInfoData fcinfo;
+ InitFunctionCallInfoData(fcinfo, &(peraggstate->collectfn), 2,
+ peraggstate->aggCollation,
+ (void *) aggstate, NULL);
+ fcinfo.arg[1] = pergroupstate->transValue;
+ fcinfo.argnull[1] = pergroupstate->transValueIsNull;
+ if (fcinfo.flinfo->fn_strict &&
+ (peraggstate->initCollectValueIsNull || pergroupstate->transValueIsNull))
+ {
+ /*
+ * We have already checked the collection and transition types are
+ * binary compatible, so we can just copy the value.
+ */
+ value = pergroupstate->transValue;
+ isnull = pergroupstate->transValueIsNull;
+ }
+ else
+ {
+ /*
+ * copy the initial datum since it might get changed inside the
+ * collection function
+ */
+ fcinfo.argnull[0] = peraggstate->initCollectValueIsNull;
+ fcinfo.arg[0] = (Datum) NULL;
+ if (!fcinfo.argnull[0])
+ {
+ fcinfo.arg[0] = datumCopy(peraggstate->initCollectValue,
+ peraggstate->collecttypeByVal,
+ peraggstate->collecttypeLen);
+ }
+ value = FunctionCallInvoke(&fcinfo);
+ isnull = fcinfo.isnull;
+ }
+ }
+ else
+ {
+ /* No collect function, just use transition values to finalize */
+ value = pergroupstate->transValue;
+ isnull = pergroupstate->transValueIsNull;
+ }
+#endif /* XCP */
/*
* Evaluate any direct arguments. We do this even if there's no finalfn
/*
* Apply the agg's finalfn if one is provided, else return transValue.
*/
- if (OidIsValid(peraggstate->finalfn_oid))
+ if (OidIsValid(peragg->finalfn_oid))
{
- int numFinalArgs = peraggstate->numFinalArgs;
+ int numFinalArgs = peragg->numFinalArgs;
- /* set up aggstate->curperagg for AggGetAggref() */
- aggstate->curperagg = peraggstate;
+ /* set up aggstate->curpertrans for AggGetAggref() */
+ aggstate->curpertrans = pertrans;
- InitFunctionCallInfoData(fcinfo, &peraggstate->finalfn,
+ InitFunctionCallInfoData(fcinfo, &peragg->finalfn,
numFinalArgs,
- peraggstate->aggCollation,
+ pertrans->aggCollation,
(void *) aggstate, NULL);
-
- /* Fill in the transition state value */
+#ifdef XCP
+ fcinfo.arg[0] = value;
+ fcinfo.argnull[0] = isnull;
+#else
fcinfo.arg[0] = pergroupstate->transValue;
fcinfo.argnull[0] = pergroupstate->transValueIsNull;
+#endif /* XCP */
+
anynull |= pergroupstate->transValueIsNull;
/* Fill any remaining argument positions with nulls */
Oid inputTypes[FUNC_MAX_ARGS];
int numArguments;
int numDirectArgs;
- int numInputs;
- int numSortCols;
- int numDistinctCols;
- List *sortlist;
HeapTuple aggTuple;
Form_pg_aggregate aggform;
+ Oid aggtranstype;
+#ifdef XCP
+ Oid aggcollecttype;
+#endif /* XCP */
AclResult aclresult;
Oid transfn_oid,
finalfn_oid;
- Expr *finalfnexpr;
- Oid aggtranstype;
+#ifdef PGXC
+ Oid collectfn_oid;
+ Expr *collectfnexpr;
+#endif /* PGXC */
+ Expr *transfnexpr,
+ *finalfnexpr;
+ Oid serialfn_oid,
+ deserialfn_oid;
Datum textInitVal;
- int i;
- ListCell *lc;
+ Datum initValue;
+ bool initValueIsNull;
/* Planner should have assigned aggregate to correct level */
Assert(aggref->agglevelsup == 0);
get_func_name(aggref->aggfnoid));
InvokeFunctionExecuteHook(aggref->aggfnoid);
+ peraggstate->transfn_oid = transfn_oid = aggform->aggtransfn;
+ peraggstate->finalfn_oid = finalfn_oid = aggform->aggfinalfn;
+#ifdef PGXC
+ peraggstate->collectfn_oid = collectfn_oid = aggform->aggcollectfn;
+ /*
+ * If preparing PHASE1 skip finalization step and return transmission
+ * value to be collected and finalized on master node.
+ * If preparing PHASE2 move collection function into transition slot,
+ * so master node collected transition values and finalithed them.
+ * Otherwise (one-node aggregation) do all steps locally, the collection
+ * function will just convert transient value for finalization function.
+ */
+ if (node->aggdistribution == AGG_SLAVE)
+ {
+ peraggstate->collectfn_oid = collectfn_oid = InvalidOid;
+ peraggstate->finalfn_oid = finalfn_oid = InvalidOid;
+ }
+ else if (node->aggdistribution == AGG_MASTER)
+ {
+ peraggstate->transfn_oid = transfn_oid = collectfn_oid;
+ peraggstate->collectfn_oid = collectfn_oid = InvalidOid;
+
+ /*
+ * Tuples should only be filtered on the datanodes when coordinator
+ * is doing collection and finalisation
+ */
+ aggref->aggfilter = NULL;
+ aggrefstate->aggfilter = NULL;
+ }
+#endif /* PGXC */
+ /* planner recorded transition state type in the Aggref itself */
+ aggtranstype = aggref->aggtranstype;
+ Assert(OidIsValid(aggtranstype));
+
+ /*
+ * If this aggregation is performing state combines, then instead of
+ * using the transition function, we'll use the combine function
+ */
+ if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
+ {
+ transfn_oid = aggform->aggcombinefn;
+
+ /* If not set then the planner messed up */
+ if (!OidIsValid(transfn_oid))
+ elog(ERROR, "combinefn not set for aggregate function");
+ }
+ else
+ transfn_oid = aggform->aggtransfn;
+
+ /* Final function only required if we're finalizing the aggregates */
+ if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
+ peragg->finalfn_oid = finalfn_oid = InvalidOid;
+ else
+ peragg->finalfn_oid = finalfn_oid = aggform->aggfinalfn;
+
+ serialfn_oid = InvalidOid;
+ deserialfn_oid = InvalidOid;
+
+ /*
+ * Check if serialization/deserialization is required. We only do it
+ * for aggregates that have transtype INTERNAL.
+ */
+ if (aggtranstype == INTERNALOID)
+ {
+ /*
+ * The planner should only have generated a serialize agg node if
+ * every aggregate with an INTERNAL state has a serialization
+ * function. Verify that.
+ */
+ if (DO_AGGSPLIT_SERIALIZE(aggstate->aggsplit))
+ {
+ /* serialization only valid when not running finalfn */
+ Assert(DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+
+ if (!OidIsValid(aggform->aggserialfn))
+ elog(ERROR, "serialfunc not provided for serialization aggregation");
+ serialfn_oid = aggform->aggserialfn;
+ }
+
+ /* Likewise for deserialization functions */
+ if (DO_AGGSPLIT_DESERIALIZE(aggstate->aggsplit))
+ {
+ /* deserialization only valid when combining states */
+ Assert(DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
+
+ if (!OidIsValid(aggform->aggdeserialfn))
+ elog(ERROR, "deserialfunc not provided for deserialization aggregation");
+ deserialfn_oid = aggform->aggdeserialfn;
+ }
+ }
+
/* Check that aggregate owner has permission to call component fns */
{
HeapTuple procTuple;
get_func_name(finalfn_oid));
InvokeFunctionExecuteHook(finalfn_oid);
}
+
+#ifdef PGXC
+ if (OidIsValid(collectfn_oid))
+ {
+ aclresult = pg_proc_aclcheck(collectfn_oid, aggOwner,
+ ACL_EXECUTE);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_PROC,
+ get_func_name(collectfn_oid));
+ }
+#endif /* PGXC */
+ if (OidIsValid(serialfn_oid))
+ {
+ aclresult = pg_proc_aclcheck(serialfn_oid, aggOwner,
+ ACL_EXECUTE);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_PROC,
+ get_func_name(serialfn_oid));
+ InvokeFunctionExecuteHook(serialfn_oid);
+ }
+ if (OidIsValid(deserialfn_oid))
+ {
+ aclresult = pg_proc_aclcheck(deserialfn_oid, aggOwner,
+ ACL_EXECUTE);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_PROC,
+ get_func_name(deserialfn_oid));
+ InvokeFunctionExecuteHook(deserialfn_oid);
+ }
}
/*
/* Count the "direct" arguments, if any */
numDirectArgs = list_length(aggref->aggdirectargs);
- /* Count the number of aggregated input columns */
- numInputs = list_length(aggref->args);
- peraggstate->numInputs = numInputs;
-
- /* Detect how many arguments to pass to the transfn */
- if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
- peraggstate->numTransInputs = numInputs;
- else
- peraggstate->numTransInputs = numArguments;
-
/* Detect how many arguments to pass to the finalfn */
if (aggform->aggfinalextra)
- peraggstate->numFinalArgs = numArguments + 1;
+ peragg->numFinalArgs = numArguments + 1;
else
- peragg->numFinalArgs = numDirectArgs + 1;
+ peraggstate->numFinalArgs = numDirectArgs + 1;
+
+ /* resolve actual type of transition state, if polymorphic */
+#ifdef XCP
+ /*
+ * We substitute function for PHASE2 and should take collection type
+ * as transient
+ */
+ if (node->aggdistribution == AGG_MASTER)
+ aggtranstype = aggform->aggcollecttype;
+ else
+#endif /* XCP */
+ aggtranstype = resolve_aggregate_transtype(aggref->aggfnoid,
+ aggform->aggtranstype,
+ inputTypes,
+ numArguments);
+#ifdef XCP
+ /* get type of collection state, if defined */
+ if (OidIsValid(collectfn_oid))
+ aggcollecttype = aggform->aggcollecttype;
+ else
+ aggcollecttype = InvalidOid;
+#endif
+ /* build expression trees using actual argument & result types */
+ build_aggregate_fnexprs(inputTypes,
+ numArguments,
+ numDirectArgs,
+ peraggstate->numFinalArgs,
+ aggref->aggvariadic,
+ aggtranstype,
+#ifdef XCP
+ aggcollecttype,
+#endif
+ aggref->aggtype,
+ aggref->inputcollid,
+ transfn_oid,
+#ifdef XCP
+ collectfn_oid,
+#endif
+ InvalidOid, /* invtrans is not needed here */
+ finalfn_oid,
+ &transfnexpr,
+ NULL,
+#ifdef XCP
+ &collectfnexpr,
+#endif
+ &finalfnexpr);
+
+ /* set up infrastructure for calling the transfn and finalfn */
+ fmgr_info(transfn_oid, &peraggstate->transfn);
+ fmgr_info_set_expr((Node *) transfnexpr, &peraggstate->transfn);
+ /*
+ * build expression trees using actual argument & result types for the
+ * finalfn, if it exists and is required.
+ */
if (OidIsValid(finalfn_oid))
{
- fmgr_info(finalfn_oid, &peraggstate->finalfn);
- fmgr_info_set_expr((Node *) finalfnexpr, &peraggstate->finalfn);
+ build_aggregate_finalfn_expr(inputTypes,
+ peragg->numFinalArgs,
+ aggtranstype,
+ aggref->aggtype,
+ aggref->inputcollid,
+ finalfn_oid,
+ &finalfnexpr);
+ fmgr_info(finalfn_oid, &peragg->finalfn);
+ fmgr_info_set_expr((Node *) finalfnexpr, &peragg->finalfn);
}
+#ifdef PGXC
+ if (OidIsValid(collectfn_oid))
+ {
+ fmgr_info(collectfn_oid, &peraggstate->collectfn);
+ peraggstate->collectfn.fn_expr = (Node *)collectfnexpr;
+ }
+#endif /* PGXC */
+ peraggstate->aggCollation = aggref->inputcollid;
+
+ InitFunctionCallInfoData(peraggstate->transfn_fcinfo,
+ &peraggstate->transfn,
+ peraggstate->numTransInputs + 1,
+ peraggstate->aggCollation,
+ (void *) aggstate, NULL);
+
+ /* get info about relevant datatypes */
+ get_typlenbyval(aggref->aggtype,
+ &peraggstate->resulttypeLen,
+ &peraggstate->resulttypeByVal);
+ get_typlenbyval(aggtranstype,
+ &peraggstate->transtypeLen,
+ &peraggstate->transtypeByVal);
+#ifdef XCP
+ if (OidIsValid(aggcollecttype))
+ get_typlenbyval(aggcollecttype,
+ &peraggstate->collecttypeLen,
+ &peraggstate->collecttypeByVal);
+#endif /* XCP */
+ /* get info about the output value's datatype */
+ get_typlenbyval(aggref->aggtype,
+ &peragg->resulttypeLen,
+ &peragg->resulttypeByVal);
/*
* initval is potentially null, so don't try to access it as a struct
* field. Must do it the hard way with SysCacheGetAttr.
*/
+#ifdef XCP
+ /*
+ * If this is Phase2 get collect initial value instead
+ */
+ if (node->aggdistribution == AGG_MASTER)
+ textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
+ Anum_pg_aggregate_agginitcollect,
+ &peraggstate->initValueIsNull);
+ else
+#endif /* XCP */
textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
Anum_pg_aggregate_agginitval,
- &peraggstate->initValueIsNull);
+ &initValueIsNull);
+ if (initValueIsNull)
+ initValue = (Datum) 0;
+ else
+ initValue = GetAggInitVal(textInitVal, aggtranstype);
- if (peraggstate->initValueIsNull)
- peraggstate->initValue = (Datum) 0;
+ /*
+ * 2. Build working state for invoking the transition function, or
+ * look up previously initialized working state, if we can share it.
+ *
+ * find_compatible_peragg() already collected a list of per-Trans's
+ * with the same inputs. Check if any of them have the same transition
+ * function and initial value.
+ */
+ existing_transno = find_compatible_pertrans(aggstate, aggref,
+ transfn_oid, aggtranstype,
+ serialfn_oid, deserialfn_oid,
+ initValue, initValueIsNull,
+ same_input_transnos);
+ if (existing_transno != -1)
+ {
+ /*
+ * Existing compatible trans found, so just point the 'peragg' to
+ * the same per-trans struct.
+ */
+ pertrans = &pertransstates[existing_transno];
+ peragg->transno = existing_transno;
+ }
else
- peraggstate->initValue = GetAggInitVal(textInitVal,
- aggtranstype);
+ {
+ pertrans = &pertransstates[++transno];
+ build_pertrans_for_aggref(pertrans, aggstate, estate,
+ aggref, transfn_oid, aggtranstype,
+ serialfn_oid, deserialfn_oid,
+ initValue, initValueIsNull,
+ inputTypes, numArguments);
+ peragg->transno = transno;
+ }
+ ReleaseSysCache(aggTuple);
+ }
+
+ /*
+ * Update numaggs to match the number of unique aggregates found. Also set
+ * numstates to the number of unique aggregate states found.
+ */
+ aggstate->numaggs = aggno + 1;
+ aggstate->numtrans = transno + 1;
+
+ return aggstate;
+ }
+
+ /*
+ * Build the state needed to calculate a state value for an aggregate.
+ *
+ * This initializes all the fields in 'pertrans'. 'aggref' is the aggregate
+ * to initialize the state for. 'aggtransfn', 'aggtranstype', and the rest
+ * of the arguments could be calculated from 'aggref', but the caller has
+ * calculated them already, so might as well pass them.
+ */
+ static void
+ build_pertrans_for_aggref(AggStatePerTrans pertrans,
+ AggState *aggstate, EState *estate,
+ Aggref *aggref,
+ Oid aggtransfn, Oid aggtranstype,
+ Oid aggserialfn, Oid aggdeserialfn,
+ Datum initValue, bool initValueIsNull,
+ Oid *inputTypes, int numArguments)
+ {
+ int numGroupingSets = Max(aggstate->maxsets, 1);
+ Expr *serialfnexpr = NULL;
+ Expr *deserialfnexpr = NULL;
+ ListCell *lc;
+ int numInputs;
+ int numDirectArgs;
+ List *sortlist;
+ int numSortCols;
+ int numDistinctCols;
+ int naggs;
+ int i;
+
+ /* Begin filling in the pertrans data */
+ pertrans->aggref = aggref;
+ pertrans->aggCollation = aggref->inputcollid;
+ pertrans->transfn_oid = aggtransfn;
+ pertrans->serialfn_oid = aggserialfn;
+ pertrans->deserialfn_oid = aggdeserialfn;
+ pertrans->initValue = initValue;
+ pertrans->initValueIsNull = initValueIsNull;
+
+ /* Count the "direct" arguments, if any */
+ numDirectArgs = list_length(aggref->aggdirectargs);
+
+ /* Count the number of aggregated input columns */
+ pertrans->numInputs = numInputs = list_length(aggref->args);
+
+ pertrans->aggtranstype = aggtranstype;
+
+ /* Detect how many arguments to pass to the transfn */
+ if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
+ pertrans->numTransInputs = numInputs;
+ else
+ pertrans->numTransInputs = numArguments;
+
+ /*
+ * When combining states, we have no use at all for the aggregate
+ * function's transfn. Instead we use the combinefn. In this case, the
+ * transfn and transfn_oid fields of pertrans refer to the combine
+ * function rather than the transition function.
+ */
+ if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
+ {
+ Expr *combinefnexpr;
+
+ build_aggregate_combinefn_expr(aggtranstype,
+ aggref->inputcollid,
+ aggtransfn,
+ &combinefnexpr);
+ fmgr_info(aggtransfn, &pertrans->transfn);
+ fmgr_info_set_expr((Node *) combinefnexpr, &pertrans->transfn);
+
+ InitFunctionCallInfoData(pertrans->transfn_fcinfo,
+ &pertrans->transfn,
+ 2,
+ pertrans->aggCollation,
+ (void *) aggstate, NULL);
+
+ /*
+ * Ensure that a combine function to combine INTERNAL states is not
+ * strict. This should have been checked during CREATE AGGREGATE, but
+ * the strict property could have been changed since then.
+ */
+ if (pertrans->transfn.fn_strict && aggtranstype == INTERNALOID)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("combine function for aggregate %u must be declared as STRICT",
+ aggref->aggfnoid)));
+ }
+ else
+ {
+ Expr *transfnexpr;
+
+ /*
+ * Set up infrastructure for calling the transfn. Note that invtrans
+ * is not needed here.
+ */
+ build_aggregate_transfn_expr(inputTypes,
+ numArguments,
+ numDirectArgs,
+ aggref->aggvariadic,
+ aggtranstype,
+ aggref->inputcollid,
+ aggtransfn,
+ InvalidOid,
+ &transfnexpr,
+ NULL);
+ fmgr_info(aggtransfn, &pertrans->transfn);
+ fmgr_info_set_expr((Node *) transfnexpr, &pertrans->transfn);
+
+ InitFunctionCallInfoData(pertrans->transfn_fcinfo,
+ &pertrans->transfn,
+ pertrans->numTransInputs + 1,
+ pertrans->aggCollation,
+ (void *) aggstate, NULL);
+#ifdef PGXC
+ /*
+ * initval for collection function is potentially null, so don't try to
+ * access it as a struct field. Must do it the hard way with
+ * SysCacheGetAttr.
+ */
+ if (OidIsValid(aggcollecttype))
+ {
+ textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
+ Anum_pg_aggregate_agginitcollect,
+ &peraggstate->initCollectValueIsNull);
+ if (peraggstate->initCollectValueIsNull)
+ peraggstate->initCollectValue = (Datum) 0;
+ else
+ peraggstate->initCollectValue = GetAggInitVal(textInitVal,
+ aggcollecttype);
+ /*
+ * If the collectfn is strict and the initval is NULL, make sure
+ * transtype and collecttype are the same (or at least
+ * binary-compatible), so that it's OK to use the transition value
+ * as the initial collectValue. This should have been checked at agg
+ * definition time, but just in case...
+ */
+ if (peraggstate->collectfn.fn_strict && peraggstate->initValueIsNull)
+ {
+ if (!IsBinaryCoercible(aggtranstype, aggcollecttype))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("aggregate %u needs to have compatible transition type and collection type",
+ aggref->aggfnoid)));
+ }
+ }
+#endif /* PGXC */
+
/*
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary-compatible), so
#include "executor/executor.h"
#include "executor/nodeForeignscan.h"
#include "foreign/fdwapi.h"
+ #include "utils/memutils.h"
#include "utils/rel.h"
+#ifdef PGXC
+#include "utils/lsyscache.h"
+#include "pgxc/pgxc.h"
+#endif
+
static TupleTableSlot *ForeignNext(ForeignScanState *node);
static bool ForeignRecheck(ForeignScanState *node, TupleTableSlot *slot);
* nodeModifyTable.c
* routines to handle ModifyTable nodes.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
i = 0;
foreach(l, node->plans)
{
+
subplan = (Plan *) lfirst(l);
+ /* Initialize the usesFdwDirectModify flag */
+ resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
+ node->fdwDirectModifyPlans);
+
/*
* Verify result relation is a valid target for the current operation
*/
* aggregate function over all rows in the current row's window frame.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
Assert(waitfor);
- w = WaitLatchOrSocket(MyLatch,
- WL_LATCH_SET | WL_POSTMASTER_DEATH | waitfor,
- port->sock, 0);
- ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
++ ModifyWaitEvent(FeBeWaitSet, 0, waitfor | WL_POSTMASTER_DEATH, NULL);
+
+ WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
+
+ /*
+ * If the postmaster has died, it's not safe to continue running,
+ * because it is the postmaster's job to kill us if some other backend
+ * exists uncleanly. Moreover, we won't run very well in this state;
+ * helper processes like walwriter and the bgwriter will exit, so
+ * performance may be poor. Finally, if we don't exit, pg_ctl will be
+ * unable to restart the postmaster without manual intervention, so no
+ * new connections can be accepted. Exiting clears the deck for a
+ * postmaster restart.
+ *
+ * (Note that we only make this check when we would otherwise sleep on
+ * our latch. We might still continue running for a while if the
+ * postmaster is killed in mid-query, or even through multiple queries
+ * if we never have to wait for read. We don't want to burn too many
+ * cycles checking for this very rare condition, and this should cause
+ * us to exit quickly in most cases.)
+ */
+ if (event.events & WL_POSTMASTER_DEATH)
+ ereport(FATAL,
+ (errcode(ERRCODE_ADMIN_SHUTDOWN),
+ errmsg("terminating connection due to unexpected postmaster exit")));
+ /*
+ * If the postmaster has died, it's not safe to continue running,
+ * because it is the postmaster's job to kill us if some other backend
+ * exists uncleanly. Moreover, we won't run very well in this state;
+ * helper processes like walwriter and the bgwriter will exit, so
+ * performance may be poor. Finally, if we don't exit, pg_ctl will
+ * be unable to restart the postmaster without manual intervention,
+ * so no new connections can be accepted. Exiting clears the deck
+ * for a postmaster restart.
+ *
+ * (Note that we only make this check when we would otherwise sleep
+ * on our latch. We might still continue running for a while if the
+ * postmaster is killed in mid-query, or even through multiple queries
+ * if we never have to wait for read. We don't want to burn too many
+ * cycles checking for this very rare condition, and this should cause
+ * us to exit quickly in most cases.)
+ */
+ if (w & WL_POSTMASTER_DEATH)
+ ereport(FATAL,
+ (errcode(ERRCODE_ADMIN_SHUTDOWN),
+ errmsg("terminating connection due to unexpected postmaster exit")));
+
/* Handle interrupt. */
- if (w & WL_LATCH_SET)
+ if (event.events & WL_LATCH_SET)
{
ResetLatch(MyLatch);
ProcessClientReadInterrupt(true);
Assert(waitfor);
- w = WaitLatchOrSocket(MyLatch,
- WL_LATCH_SET | WL_POSTMASTER_DEATH | waitfor,
- port->sock, 0);
- ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
++ ModifyWaitEvent(FeBeWaitSet, 0, waitfor | WL_POSTMASTER_DEATH, NULL);
+
+ WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
+
+ /* See comments in secure_read. */
+ if (event.events & WL_POSTMASTER_DEATH)
+ ereport(FATAL,
+ (errcode(ERRCODE_ADMIN_SHUTDOWN),
+ errmsg("terminating connection due to unexpected postmaster exit")));
+ /* See comments in secure_read. */
+ if (w & WL_POSTMASTER_DEATH)
+ ereport(FATAL,
+ (errcode(ERRCODE_ADMIN_SHUTDOWN),
+ errmsg("terminating connection due to unexpected postmaster exit")));
+
/* Handle interrupt. */
- if (w & WL_LATCH_SET)
+ if (event.events & WL_LATCH_SET)
{
ResetLatch(MyLatch);
ProcessClientWriteInterrupt(true);
* be handled easily in a simple depth-first traversal.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* IDENTIFICATION
* src/backend/nodes/copyfuncs.c
#include "postgres.h"
#include "miscadmin.h"
+ #include "nodes/extensible.h"
#include "nodes/plannodes.h"
#include "nodes/relation.h"
+#ifdef PGXC
+#include "pgxc/locator.h"
+#include "pgxc/planner.h"
+#endif
+#ifdef XCP
+#include "pgxc/execRemote.h"
+#endif
#include "utils/datum.h"
+ #include "utils/rel.h"
/*
COPY_NODE_FIELD(relationOids);
COPY_NODE_FIELD(invalItems);
COPY_SCALAR_FIELD(nParamExec);
- COPY_SCALAR_FIELD(hasRowSecurity);
+#ifdef XCP
+ COPY_SCALAR_FIELD(nParamRemote);
+ COPY_POINTER_FIELD(remoteparams,
+ newnode->nParamRemote * sizeof(RemoteParam));
+ COPY_STRING_FIELD(pname);
+ COPY_SCALAR_FIELD(distributionType);
+ COPY_SCALAR_FIELD(distributionKey);
+ COPY_NODE_FIELD(distributionNodes);
+ COPY_NODE_FIELD(distributionRestrict);
+#endif
return newnode;
}
CopyPlanFields((const Plan *) from, (Plan *) newnode);
COPY_SCALAR_FIELD(aggstrategy);
+#ifdef XCP
+ COPY_SCALAR_FIELD(aggdistribution);
+#endif
+ COPY_SCALAR_FIELD(aggsplit);
COPY_SCALAR_FIELD(numCols);
if (from->numCols > 0)
{
return newnode;
}
+#ifdef PGXC
+/* ****************************************************************
+ * barrier.h copy functions
+ * ****************************************************************
+ */
+static BarrierStmt *
+_copyBarrierStmt(const BarrierStmt *from)
+{
+ BarrierStmt *newnode = makeNode(BarrierStmt);
+
+ COPY_STRING_FIELD(id);
+
+ return newnode;
+}
+
+static PauseClusterStmt *
+_copyPauseClusterStmt(const PauseClusterStmt *from)
+{
+ PauseClusterStmt *newnode = makeNode(PauseClusterStmt);
+
+ COPY_SCALAR_FIELD(pause);
+
+ return newnode;
+}
+
+/* ****************************************************************
+ * nodemgr.h copy functions
+ * ****************************************************************
+ */
+static AlterNodeStmt *
+_copyAlterNodeStmt(const AlterNodeStmt *from)
+{
+ AlterNodeStmt *newnode = makeNode(AlterNodeStmt);
+
+ COPY_STRING_FIELD(node_name);
+ COPY_NODE_FIELD(options);
+
+ return newnode;
+}
+
+static CreateNodeStmt *
+_copyCreateNodeStmt(const CreateNodeStmt *from)
+{
+ CreateNodeStmt *newnode = makeNode(CreateNodeStmt);
+
+ COPY_STRING_FIELD(node_name);
+ COPY_NODE_FIELD(options);
+
+ return newnode;
+}
+
+static DropNodeStmt *
+_copyDropNodeStmt(const DropNodeStmt *from)
+{
+ DropNodeStmt *newnode = makeNode(DropNodeStmt);
+
+ COPY_STRING_FIELD(node_name);
+
+ return newnode;
+}
+
+/* ****************************************************************
+ * groupmgr.h copy functions
+ * ****************************************************************
+ */
+static CreateGroupStmt *
+_copyCreateGroupStmt(const CreateGroupStmt *from)
+{
+ CreateGroupStmt *newnode = makeNode(CreateGroupStmt);
+
+ COPY_STRING_FIELD(group_name);
+ COPY_NODE_FIELD(nodes);
+
+ return newnode;
+}
+
+static DropGroupStmt *
+_copyDropGroupStmt(const DropGroupStmt *from)
+{
+ DropGroupStmt *newnode = makeNode(DropGroupStmt);
+
+ COPY_STRING_FIELD(group_name);
++ return newnode;
++}
+
+ static ForeignKeyCacheInfo *
+ _copyForeignKeyCacheInfo(const ForeignKeyCacheInfo *from)
+ {
+ ForeignKeyCacheInfo *newnode = makeNode(ForeignKeyCacheInfo);
+
+ COPY_SCALAR_FIELD(conrelid);
+ COPY_SCALAR_FIELD(confrelid);
+ COPY_SCALAR_FIELD(nkeys);
+ /* COPY_SCALAR_FIELD might work for these, but let's not assume that */
+ memcpy(newnode->conkey, from->conkey, sizeof(newnode->conkey));
+ memcpy(newnode->confkey, from->confkey, sizeof(newnode->confkey));
+ memcpy(newnode->conpfeqop, from->conpfeqop, sizeof(newnode->conpfeqop));
return newnode;
}
* "x" to be considered equal() to another reference to "x" in the query.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* IDENTIFICATION
* src/backend/nodes/equalfuncs.c
_equalAggref(const Aggref *a, const Aggref *b)
{
COMPARE_SCALAR_FIELD(aggfnoid);
- COMPARE_SCALAR_FIELD(aggtype);
COMPARE_SCALAR_FIELD(aggcollid);
COMPARE_SCALAR_FIELD(inputcollid);
+ /* ignore aggtranstype since it might not be set yet */
+ COMPARE_NODE_FIELD(aggargtypes);
COMPARE_NODE_FIELD(aggdirectargs);
COMPARE_NODE_FIELD(args);
COMPARE_NODE_FIELD(aggorder);
* outfuncs.c
* Output functions for Postgres tree nodes.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include <ctype.h>
#include "lib/stringinfo.h"
+ #include "nodes/extensible.h"
#include "nodes/plannodes.h"
#include "nodes/relation.h"
+#ifdef XCP
+#include "fmgr.h"
+#include "miscadmin.h"
+#include "catalog/namespace.h"
+#include "pgxc/execRemote.h"
+#include "utils/lsyscache.h"
+#endif
#include "utils/datum.h"
+#ifdef PGXC
+#include "pgxc/planner.h"
+#endif
+
+#ifdef XCP
+/*
+ * When we sending query plans between nodes we need to send OIDs of various
+ * objects - relations, data types, functions, etc.
+ * On different nodes OIDs of these objects may differ, so we need to send an
+ * identifier, depending on object type, allowing to lookup OID on target node.
+ * On the other hand we want to save space when storing rules, or in other cases
+ * when we need to encode and decode nodes on the same node.
+ * For now default format is not portable, as it is in original Postgres code.
+ * Later we may want to add extra parameter in nodeToString() function
+ */
+static bool portable_output = false;
+void
+set_portable_output(bool value)
+{
+ portable_output = value;
+}
+#endif
+ #include "utils/rel.h"
/*
/* Write a Node field */
#define WRITE_NODE_FIELD(fldname) \
- (appendStringInfo(str, " :" CppAsString(fldname) " "), \
- outNode(str, node->fldname))
+ do { \
+ appendStringInfo(str, " :" CppAsString(fldname) " "); \
- _outNode(str, node->fldname); \
++ outNode(str, node->fldname); \
+ } while (0)
/* Write a bitmapset field */
#define WRITE_BITMAPSET_FIELD(fldname) \
WRITE_NODE_FIELD(rowMarks);
WRITE_INT_FIELD(epqParam);
WRITE_ENUM_FIELD(onConflictAction, OnConflictAction);
+#ifdef XCP
+ if (portable_output)
+ WRITE_RELID_LIST_FIELD(arbiterIndexes);
+ else
+ {
+#endif
WRITE_NODE_FIELD(arbiterIndexes);
+#ifdef XCP
+ }
+#endif
WRITE_NODE_FIELD(onConflictSet);
WRITE_NODE_FIELD(onConflictWhere);
- WRITE_INT_FIELD(exclRelRTI);
+ WRITE_UINT_FIELD(exclRelRTI);
WRITE_NODE_FIELD(exclRelTlist);
}
_outPlanInfo(str, (const Plan *) node);
WRITE_ENUM_FIELD(aggstrategy, AggStrategy);
+#ifdef XCP
+ WRITE_ENUM_FIELD(aggdistribution, AggDistribution);
+#endif
+ WRITE_ENUM_FIELD(aggsplit, AggSplit);
WRITE_INT_FIELD(numCols);
appendStringInfoString(str, " :grpColIdx");
if (node->constisnull)
appendStringInfoString(str, "<>");
else
- _outDatum(str, node->constvalue, node->constlen, node->constbyval);
+#ifdef XCP
+ if (portable_output)
+ _printDatum(str, node->constvalue, node->consttype);
+ else
+#endif
+ outDatum(str, node->constvalue, node->constlen, node->constbyval);
}
static void
{
WRITE_NODE_TYPE("AGGREF");
+#ifdef XCP
+ if (portable_output)
+ WRITE_FUNCID_FIELD(aggfnoid);
+ else
+#endif
WRITE_OID_FIELD(aggfnoid);
+#ifdef XCP
+ if (portable_output)
+ WRITE_TYPID_FIELD(aggtype);
+ else
+#endif
WRITE_OID_FIELD(aggtype);
+#ifdef XCP
+ if (portable_output)
+ WRITE_COLLID_FIELD(aggcollid);
+ else
+#endif
WRITE_OID_FIELD(aggcollid);
+#ifdef XCP
+ if (portable_output)
+ WRITE_COLLID_FIELD(inputcollid);
+ else
+#endif
WRITE_OID_FIELD(inputcollid);
+ WRITE_OID_FIELD(aggtranstype);
+ WRITE_NODE_FIELD(aggargtypes);
WRITE_NODE_FIELD(aggdirectargs);
WRITE_NODE_FIELD(args);
WRITE_NODE_FIELD(aggorder);
case T_XmlSerialize:
_outXmlSerialize(str, obj);
break;
++<<<<<<< HEAD
+#ifdef PGXC
+ case T_ExecNodes:
+ _outExecNodes(str, obj);
+ break;
+#endif
++=======
+ case T_ForeignKeyCacheInfo:
+ _outForeignKeyCacheInfo(str, obj);
+ break;
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
default:
* readfuncs.c
* Reader functions for Postgres tree nodes.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
#include <math.h>
+ #include "fmgr.h"
+ #include "nodes/extensible.h"
#include "nodes/parsenodes.h"
+ #include "nodes/plannodes.h"
#include "nodes/readfuncs.h"
+#ifdef PGXC
+#include "access/htup.h"
+#endif
+#ifdef XCP
+#include "fmgr.h"
+#include "catalog/namespace.h"
+#include "catalog/pg_class.h"
+#include "nodes/plannodes.h"
+#include "pgxc/execRemote.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+/*
+ * When we sending query plans between nodes we need to send OIDs of various
+ * objects - relations, data types, functions, etc.
+ * On different nodes OIDs of these objects may differ, so we need to send an
+ * identifier, depending on object type, allowing to lookup OID on target node.
+ * On the other hand we want to save space when storing rules, or in other cases
+ * when we need to encode and decode nodes on the same node.
+ * For now default format is not portable, as it is in original Postgres code.
+ * Later we may want to add extra parameter in stringToNode() function
+ */
+static bool portable_input = false;
+void
+set_portable_input(bool value)
+{
+ portable_input = value;
+}
+#endif /* XCP */
+
/*
* Macros to simplify reading of different kinds of fields. Use these
* wherever possible to reduce the chance for silly typos. Note that these
token = pg_strtok(&length); /* get field value */ \
local_node->fldname = atoui(token)
++<<<<<<< HEAD
+#ifdef XCP
+/* Read a long integer field (anything written as ":fldname %ld") */
++=======
+ /* Read an long integer field (anything written as ":fldname %ld") */
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
#define READ_LONG_FIELD(fldname) \
token = pg_strtok(&length); /* skip :fldname */ \
token = pg_strtok(&length); /* get field value */ \
local_node->fldname = atol(token)
++<<<<<<< HEAD
+#endif
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
/* Read an OID field (don't hard-wire assumption that OID is same as uint) */
+#ifdef XCP
+#define READ_OID_FIELD(fldname) \
+ (AssertMacro(!portable_input), /* only allow to read OIDs within a node */ \
+ token = pg_strtok(&length), /* skip :fldname */ \
+ token = pg_strtok(&length), /* get field value */ \
+ local_node->fldname = atooid(token))
+#else
#define READ_OID_FIELD(fldname) \
token = pg_strtok(&length); /* skip :fldname */ \
token = pg_strtok(&length); /* get field value */ \
(void) token; /* in case not used elsewhere */ \
local_node->fldname = _readBitmapset()
++<<<<<<< HEAD
+#ifdef XCP
+/* Read fields of a Plan node */
+#define READ_PLAN_FIELDS(nodeTypeName) \
+ Plan *plan_node; \
+ READ_LOCALS(nodeTypeName); \
+ plan_node = (Plan *) local_node; \
+ token = pg_strtok(&length); /* skip :startup_cost */ \
+ token = pg_strtok(&length); /* get field value */ \
+ plan_node->startup_cost = atof(token); \
+ token = pg_strtok(&length); /* skip :total_cost */ \
+ token = pg_strtok(&length); /* get field value */ \
+ plan_node->total_cost = atof(token); \
+ token = pg_strtok(&length); /* skip :plan_rows */ \
+ token = pg_strtok(&length); /* get field value */ \
+ plan_node->plan_rows = atof(token); \
+ token = pg_strtok(&length); /* skip :plan_width */ \
+ token = pg_strtok(&length); /* get field value */ \
+ plan_node->plan_width = atoi(token); \
+ token = pg_strtok(&length); /* skip :targetlist */ \
+ plan_node->targetlist = nodeRead(NULL, 0); \
+ token = pg_strtok(&length); /* skip :qual */ \
+ plan_node->qual = nodeRead(NULL, 0); \
+ token = pg_strtok(&length); /* skip :lefttree */ \
+ plan_node->lefttree = nodeRead(NULL, 0); \
+ token = pg_strtok(&length); /* skip :righttree */ \
+ plan_node->righttree = nodeRead(NULL, 0); \
+ token = pg_strtok(&length); /* skip :initPlan */ \
+ plan_node->initPlan = nodeRead(NULL, 0); \
+ token = pg_strtok(&length); /* skip :extParam */ \
+ plan_node->extParam = _readBitmapset(); \
+ token = pg_strtok(&length); /* skip :allParam */ \
+ plan_node->allParam = _readBitmapset()
+
+/* Read fields of a Scan node */
+#define READ_SCAN_FIELDS(nodeTypeName) \
+ Scan *scan_node; \
+ READ_PLAN_FIELDS(nodeTypeName); \
+ scan_node = (Scan *) local_node; \
+ token = pg_strtok(&length); /* skip :scanrelid */ \
+ token = pg_strtok(&length); /* get field value */ \
+ scan_node->scanrelid = atoi(token)
+
+/* Read fields of a Join node */
+#define READ_JOIN_FIELDS(nodeTypeName) \
+ Join *join_node; \
+ READ_PLAN_FIELDS(nodeTypeName); \
+ join_node = (Join *) local_node; \
+ token = pg_strtok(&length); /* skip :jointype */ \
+ token = pg_strtok(&length); /* get field value */ \
+ join_node->jointype = (JoinType) atoi(token); \
+ token = pg_strtok(&length); /* skip :joinqual */ \
+ join_node->joinqual = nodeRead(NULL, 0)
+
+/*
+ * Macros to read an identifier and lookup the OID
+ * The identifier depends on object type.
+ */
+#define NSP_OID(nspname) LookupNamespaceNoError(nspname)
+
+/* Read relation identifier and lookup the OID */
+#define READ_RELID_INTERNAL(relid, warn) \
+ do { \
+ char *nspname; /* namespace name */ \
+ char *relname; /* relation name */ \
+ token = pg_strtok(&length); /* get nspname */ \
+ nspname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* get relname */ \
+ relname = nullable_string(token, length); \
+ if (relname) \
+ { \
+ relid = get_relname_relid(relname, \
+ NSP_OID(nspname)); \
+ if (!OidIsValid((relid)) && (warn)) \
+ elog(WARNING, "could not find OID for relation %s.%s", nspname,\
+ relname); \
+ } \
+ else \
+ relid = InvalidOid; \
+ } while (0)
+
+#define READ_RELID_FIELD_NOWARN(fldname) \
+ do { \
+ Oid relid; \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ READ_RELID_INTERNAL(relid, false); \
+ local_node->fldname = relid; \
+ } while (0)
+
+#define READ_RELID_FIELD(fldname) \
+ do { \
+ Oid relid; \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ READ_RELID_INTERNAL(relid, true); \
+ local_node->fldname = relid; \
+ } while (0)
+
+#define READ_RELID_LIST_FIELD(fldname) \
+ do { \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ token = pg_strtok(&length); /* skip '(' */ \
+ if (length > 0 ) \
+ { \
+ Assert(token[0] == '('); \
+ for (;;) \
+ { \
+ Oid relid; \
+ READ_RELID_INTERNAL(relid, true); \
+ local_node->fldname = lappend_oid(local_node->fldname, relid); \
+ token = pg_strtok(&length); \
+ if (token[0] == ')') \
+ break; \
+ } \
+ } \
+ else \
+ local_node->fldname = NIL; \
+ } while (0)
+
+/* Read data type identifier and lookup the OID */
+#define READ_TYPID_FIELD(fldname) \
+ do { \
+ char *nspname; /* namespace name */ \
+ char *typname; /* data type name */ \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ token = pg_strtok(&length); /* get nspname */ \
+ nspname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* get typname */ \
+ typname = nullable_string(token, length); \
+ if (typname) \
+ local_node->fldname = get_typname_typid(typname, \
+ NSP_OID(nspname)); \
+ else \
+ local_node->fldname = InvalidOid; \
+ } while (0)
+
+/* Read function identifier and lookup the OID */
+#define READ_FUNCID_FIELD(fldname) \
+ do { \
+ char *nspname; /* namespace name */ \
+ char *funcname; /* function name */ \
+ int nargs; /* number of arguments */ \
+ Oid *argtypes; /* argument types */ \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ token = pg_strtok(&length); /* get nspname */ \
+ nspname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* get funcname */ \
+ funcname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* get nargs */ \
+ nargs = atoi(token); \
+ if (funcname) \
+ { \
+ int i; \
+ argtypes = palloc(nargs * sizeof(Oid)); \
+ for (i = 0; i < nargs; i++) \
+ { \
+ char *typnspname; /* argument type namespace */ \
+ char *typname; /* argument type name */ \
+ token = pg_strtok(&length); /* get type nspname */ \
+ typnspname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* get type name */ \
+ typname = nullable_string(token, length); \
+ argtypes[i] = get_typname_typid(typname, \
+ NSP_OID(typnspname)); \
+ } \
+ local_node->fldname = get_funcid(funcname, \
+ buildoidvector(argtypes, nargs), \
+ NSP_OID(nspname)); \
+ } \
+ else \
+ local_node->fldname = InvalidOid; \
+ } while (0)
+
+/* Read operator identifier and lookup the OID */
+#define READ_OPERID_FIELD(fldname) \
+ do { \
+ char *nspname; /* namespace name */ \
+ char *oprname; /* operator name */ \
+ char *leftnspname; /* left type namespace */ \
+ char *leftname; /* left type name */ \
+ Oid oprleft; /* left type */ \
+ char *rightnspname; /* right type namespace */ \
+ char *rightname; /* right type name */ \
+ Oid oprright; /* right type */ \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ token = pg_strtok(&length); /* get nspname */ \
+ nspname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* get operator name */ \
+ oprname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* left type namespace */ \
+ leftnspname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* left type name */ \
+ leftname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* right type namespace */ \
+ rightnspname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* right type name */ \
+ rightname = nullable_string(token, length); \
+ if (oprname) \
+ { \
+ if (leftname) \
+ oprleft = get_typname_typid(leftname, \
+ NSP_OID(leftnspname)); \
+ else \
+ oprleft = InvalidOid; \
+ if (rightname) \
+ oprright = get_typname_typid(rightname, \
+ NSP_OID(rightnspname)); \
+ else \
+ oprright = InvalidOid; \
+ local_node->fldname = get_operid(oprname, \
+ oprleft, \
+ oprright, \
+ NSP_OID(nspname)); \
+ } \
+ else \
+ local_node->fldname = InvalidOid; \
+ } while (0)
+
+/* Read collation identifier and lookup the OID */
+#define READ_COLLID_FIELD(fldname) \
+ do { \
+ char *nspname; /* namespace name */ \
+ char *collname; /* collation name */ \
+ int collencoding; /* collation encoding */ \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ token = pg_strtok(&length); /* get nspname */ \
+ nspname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* get collname */ \
+ collname = nullable_string(token, length); \
+ token = pg_strtok(&length); /* get collencoding */ \
+ collencoding = atoi(token); \
+ if (collname) \
+ local_node->fldname = get_collid(collname, \
+ collencoding, \
+ NSP_OID(nspname)); \
+ else \
+ local_node->fldname = InvalidOid; \
+ } while (0)
+#endif
++=======
+ /* Read an attribute number array */
+ #define READ_ATTRNUMBER_ARRAY(fldname, len) \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ local_node->fldname = readAttrNumberCols(len);
+
+ /* Read an oid array */
+ #define READ_OID_ARRAY(fldname, len) \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ local_node->fldname = readOidCols(len);
+
+ /* Read an int array */
+ #define READ_INT_ARRAY(fldname, len) \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ local_node->fldname = readIntCols(len);
+
+ /* Read a bool array */
+ #define READ_BOOL_ARRAY(fldname, len) \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ local_node->fldname = readBoolCols(len);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
/* Routine exit */
#define READ_DONE() \
((length) == 0 ? NULL : debackslash(token, length))
++<<<<<<< HEAD
+static Datum readDatum(bool typbyval);
+#ifdef XCP
+static Datum scanDatum(Oid typid, int typmod);
+#endif
+
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
/*
* _readBitmapset
*/
{
READ_LOCALS(Aggref);
+#ifdef XCP
+ if (portable_input)
+ READ_FUNCID_FIELD(aggfnoid);
+ else
+#endif
READ_OID_FIELD(aggfnoid);
+#ifdef XCP
+ if (portable_input)
+ READ_TYPID_FIELD(aggtype);
+ else
+#endif
READ_OID_FIELD(aggtype);
+#ifdef XCP
+ if (portable_input)
+ READ_COLLID_FIELD(aggcollid);
+ else
+#endif
READ_OID_FIELD(aggcollid);
+#ifdef XCP
+ if (portable_input)
+ READ_COLLID_FIELD(inputcollid);
+ else
+#endif
READ_OID_FIELD(inputcollid);
+ READ_OID_FIELD(aggtranstype);
+ READ_NODE_FIELD(aggargtypes);
READ_NODE_FIELD(aggdirectargs);
READ_NODE_FIELD(args);
READ_NODE_FIELD(aggorder);
{
READ_LOCALS(OpExpr);
+#ifdef XCP
+ if (portable_input)
+ READ_OPERID_FIELD(opno);
+ else
+#endif
READ_OID_FIELD(opno);
+#ifdef XCP
+ if (portable_input)
+ READ_FUNCID_FIELD(opfuncid);
+ else
+#endif
READ_OID_FIELD(opfuncid);
++<<<<<<< HEAD
+
+#ifdef XCP
+ if (portable_input)
+ READ_TYPID_FIELD(opresulttype);
+ else
+#endif
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
READ_OID_FIELD(opresulttype);
READ_BOOL_FIELD(opretset);
+#ifdef XCP
+ if (portable_input)
+ READ_COLLID_FIELD(opcollid);
+ else
+#endif
READ_OID_FIELD(opcollid);
+#ifdef XCP
+ if (portable_input)
+ READ_COLLID_FIELD(inputcollid);
+ else
+#endif
READ_OID_FIELD(inputcollid);
READ_NODE_FIELD(args);
READ_LOCATION_FIELD(location);
{
READ_LOCALS(DistinctExpr);
+#ifdef XCP
+ if (portable_input)
+ READ_OPERID_FIELD(opno);
+ else
+#endif
READ_OID_FIELD(opno);
+#ifdef XCP
+ if (portable_input)
+ READ_FUNCID_FIELD(opfuncid);
+ else
+#endif
READ_OID_FIELD(opfuncid);
++<<<<<<< HEAD
+
+#ifdef XCP
+ if (portable_input)
+ READ_TYPID_FIELD(opresulttype);
+ else
+#endif
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
READ_OID_FIELD(opresulttype);
READ_BOOL_FIELD(opretset);
+#ifdef XCP
+ if (portable_input)
+ READ_COLLID_FIELD(opcollid);
+ else
+#endif
READ_OID_FIELD(opcollid);
+#ifdef XCP
+ if (portable_input)
+ READ_COLLID_FIELD(inputcollid);
+ else
+#endif
READ_OID_FIELD(inputcollid);
READ_NODE_FIELD(args);
READ_LOCATION_FIELD(location);
{
READ_LOCALS(NullIfExpr);
+#ifdef XCP
+ if (portable_input)
+ READ_OPERID_FIELD(opno);
+ else
+#endif
READ_OID_FIELD(opno);
+#ifdef XCP
+ if (portable_input)
+ READ_FUNCID_FIELD(opfuncid);
+ else
+#endif
READ_OID_FIELD(opfuncid);
++<<<<<<< HEAD
+
+ /*
+ * The opfuncid is stored in the textual format primarily for debugging
+ * and documentation reasons. We want to always read it as zero to force
+ * it to be re-looked-up in the pg_operator entry. This ensures that
+ * stored rules don't have hidden dependencies on operators' functions.
+ * (We don't currently support an ALTER OPERATOR command, but might
+ * someday.)
+ */
+#ifdef XCP
+ /* Do not invalidate if we have just looked up the value */
+ if (!portable_input)
+#endif
+ local_node->opfuncid = InvalidOid;
+
+#ifdef XCP
+ if (portable_input)
+ READ_TYPID_FIELD(opresulttype);
+ else
+#endif
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
READ_OID_FIELD(opresulttype);
READ_BOOL_FIELD(opretset);
+#ifdef XCP
+ if (portable_input)
+ READ_COLLID_FIELD(opcollid);
+ else
+#endif
READ_OID_FIELD(opcollid);
+#ifdef XCP
+ if (portable_input)
+ READ_COLLID_FIELD(inputcollid);
+ else
+#endif
READ_OID_FIELD(inputcollid);
READ_NODE_FIELD(args);
READ_LOCATION_FIELD(location);
{
READ_LOCALS(TableSampleClause);
++<<<<<<< HEAD
+#ifdef XCP
+ if (portable_input)
+ {
+ READ_FUNCID_FIELD(tsmhandler);
+ }
+ else
+ {
+#endif
+ READ_OID_FIELD(tsmhandler);
+#ifdef XCP
+ }
+#endif
++=======
+ READ_OID_FIELD(tsmhandler);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
READ_NODE_FIELD(args);
READ_NODE_FIELD(repeatable);
READ_DONE();
}
-/*
- * _readDefElem
- */
-static DefElem *
-_readDefElem(void)
-{
- READ_LOCALS(DefElem);
-
- READ_STRING_FIELD(defnamespace);
- READ_STRING_FIELD(defname);
- READ_NODE_FIELD(arg);
- READ_ENUM_FIELD(defaction, DefElemAction);
-
- READ_DONE();
-}
-
-/*
- * _readPlannedStmt
- */
-static PlannedStmt *
-_readPlannedStmt(void)
-{
- READ_LOCALS(PlannedStmt);
-
- READ_ENUM_FIELD(commandType, CmdType);
- READ_UINT_FIELD(queryId);
- READ_BOOL_FIELD(hasReturning);
- READ_BOOL_FIELD(hasModifyingCTE);
- READ_BOOL_FIELD(canSetTag);
- READ_BOOL_FIELD(transientPlan);
- READ_BOOL_FIELD(dependsOnRole);
- READ_BOOL_FIELD(parallelModeNeeded);
- READ_NODE_FIELD(planTree);
- READ_NODE_FIELD(rtable);
- READ_NODE_FIELD(resultRelations);
- READ_NODE_FIELD(utilityStmt);
- READ_NODE_FIELD(subplans);
- READ_BITMAPSET_FIELD(rewindPlanIDs);
- READ_NODE_FIELD(rowMarks);
- READ_NODE_FIELD(relationOids);
- READ_NODE_FIELD(invalItems);
- READ_INT_FIELD(nParamExec);
-
- READ_DONE();
-}
-
-/*
- * ReadCommonPlan
- * Assign the basic stuff of all nodes that inherit from Plan
- */
-static void
-ReadCommonPlan(Plan *local_node)
-{
- READ_TEMP_LOCALS();
-
- READ_FLOAT_FIELD(startup_cost);
- READ_FLOAT_FIELD(total_cost);
- READ_FLOAT_FIELD(plan_rows);
- READ_INT_FIELD(plan_width);
- READ_BOOL_FIELD(parallel_aware);
- READ_INT_FIELD(plan_node_id);
- READ_NODE_FIELD(targetlist);
- READ_NODE_FIELD(qual);
- READ_NODE_FIELD(lefttree);
- READ_NODE_FIELD(righttree);
- READ_NODE_FIELD(initPlan);
- READ_BITMAPSET_FIELD(extParam);
- READ_BITMAPSET_FIELD(allParam);
-}
+#ifdef XCP
/*
++<<<<<<< HEAD
* _readPlan
*/
static Plan *
READ_INT_FIELD(wtParam);
READ_INT_FIELD(numCols);
- READ_ATTRNUMBER_ARRAY(dupColIdx, local_node->numCols);
- READ_OID_ARRAY(dupOperators, local_node->numCols);
- READ_LONG_FIELD(numGroups);
+
+ token = pg_strtok(&length); /* skip :dupColIdx */
+ local_node->dupColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->dupColIdx[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :dupOperators */
+ local_node->dupOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->dupOperators[i] = atooid(token);
+ }
+
+ READ_LONG_FIELD(numGroups);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readBitmapAnd
+ */
+static BitmapAnd *
+_readBitmapAnd(void)
+{
+ READ_PLAN_FIELDS(BitmapAnd);
+
+ READ_NODE_FIELD(bitmapplans);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readBitmapOr
+ */
+static BitmapOr *
+_readBitmapOr(void)
+{
+ READ_PLAN_FIELDS(BitmapOr);
+
+ READ_NODE_FIELD(bitmapplans);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readScan
+ */
+static Scan *
+_readScan(void)
+{
+ READ_SCAN_FIELDS(Scan);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readSeqScan
+ */
+static SeqScan *
+_readSeqScan(void)
+{
+ READ_SCAN_FIELDS(SeqScan);
+
+ READ_DONE();
+}
+
+/*
+ * _readSampleScan
+ */
+static SampleScan *
+_readSampleScan(void)
+{
+ READ_SCAN_FIELDS(SampleScan);
+ READ_NODE_FIELD(tablesample);
+
+ READ_DONE();
+}
+
+/*
+ * _readIndexScan
+ */
+static IndexScan *
+_readIndexScan(void)
+{
+ READ_SCAN_FIELDS(IndexScan);
+
+ if (portable_input)
+ READ_RELID_FIELD(indexid);
+ else
+ READ_OID_FIELD(indexid);
+ READ_NODE_FIELD(indexqual);
+ READ_NODE_FIELD(indexqualorig);
+ READ_NODE_FIELD(indexorderby);
+ READ_NODE_FIELD(indexorderbyorig);
+ READ_NODE_FIELD(indexorderbyops);
+ READ_ENUM_FIELD(indexorderdir, ScanDirection);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readIndexOnlyScan
+ */
+static IndexOnlyScan *
+_readIndexOnlyScan(void)
+{
+ READ_SCAN_FIELDS(IndexOnlyScan);
+
+ if (portable_input)
+ READ_RELID_FIELD(indexid);
+ else
+ READ_OID_FIELD(indexid);
+ READ_NODE_FIELD(indexqual);
+ READ_NODE_FIELD(indexorderby);
+ READ_NODE_FIELD(indextlist);
+ READ_ENUM_FIELD(indexorderdir, ScanDirection);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readBitmapIndexScan
+ */
+static BitmapIndexScan *
+_readBitmapIndexScan(void)
+{
+ READ_SCAN_FIELDS(BitmapIndexScan);
+
+ if (portable_input)
+ READ_RELID_FIELD(indexid);
+ else
+ READ_OID_FIELD(indexid);
+ READ_NODE_FIELD(indexqual);
+ READ_NODE_FIELD(indexqualorig);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readBitmapHeapScan
+ */
+static BitmapHeapScan *
+_readBitmapHeapScan(void)
+{
+ READ_SCAN_FIELDS(BitmapHeapScan);
+
+ READ_NODE_FIELD(bitmapqualorig);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readTidScan
+ */
+static TidScan *
+_readTidScan(void)
+{
+ READ_SCAN_FIELDS(TidScan);
+
+ READ_NODE_FIELD(tidquals);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readSubqueryScan
+ */
+static SubqueryScan *
+_readSubqueryScan(void)
+{
+ READ_SCAN_FIELDS(SubqueryScan);
+
+ READ_NODE_FIELD(subplan);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readFunctionScan
+ */
+static FunctionScan *
+_readFunctionScan(void)
+{
+ READ_SCAN_FIELDS(FunctionScan);
+
+ READ_NODE_FIELD(functions);
+ READ_BOOL_FIELD(funcordinality);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readValuesScan
+ */
+static ValuesScan *
+_readValuesScan(void)
+{
+ READ_SCAN_FIELDS(ValuesScan);
+
+ READ_NODE_FIELD(values_lists);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readCteScan
+ */
+static CteScan *
+_readCteScan(void)
+{
+ READ_SCAN_FIELDS(CteScan);
+
+ READ_INT_FIELD(ctePlanId);
+ READ_INT_FIELD(cteParam);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readWorkTableScan
+ */
+static WorkTableScan *
+_readWorkTableScan(void)
+{
+ READ_SCAN_FIELDS(WorkTableScan);
+
+ READ_INT_FIELD(wtParam);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readJoin
+ */
+static Join *
+_readJoin(void)
+{
+ READ_JOIN_FIELDS(Join);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readNestLoop
+ */
+static NestLoop *
+_readNestLoop(void)
+{
+ READ_JOIN_FIELDS(NestLoop);
+
+ READ_NODE_FIELD(nestParams);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readMergeJoin
+ */
+static MergeJoin *
+_readMergeJoin(void)
+{
+ int numCols;
+ int i;
+ READ_JOIN_FIELDS(MergeJoin);
+
+ READ_NODE_FIELD(mergeclauses);
+ numCols = list_length(local_node->mergeclauses);
+
+
+ token = pg_strtok(&length); /* skip :mergeFamilies */
+ local_node->mergeFamilies = (Oid *) palloc(numCols * sizeof(Oid));
+ for (i = 0; i < numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->mergeFamilies[i] = atooid(token);
+ }
+
+ token = pg_strtok(&length); /* skip :mergeCollations */
+ local_node->mergeCollations = (Oid *) palloc(numCols * sizeof(Oid));
+ for (i = 0; i < numCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *collname; /* collation name */
+ int collencoding; /* collation encoding */
+ /* the token is already read */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get collname */
+ collname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get nargs */
+ collencoding = atoi(token);
+ if (collname)
+ local_node->mergeCollations[i] = get_collid(collname,
+ collencoding,
+ NSP_OID(nspname));
+ else
+ local_node->mergeCollations[i] = InvalidOid;
+ }
+ else
+ local_node->mergeCollations[i] = atooid(token);
+ }
+
+ token = pg_strtok(&length); /* skip :mergeStrategies */
+ local_node->mergeStrategies = (int *) palloc(numCols * sizeof(int));
+ for (i = 0; i < numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->mergeStrategies[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :mergeNullsFirst */
+ local_node->mergeNullsFirst = (bool *) palloc(numCols * sizeof(bool));
+ for (i = 0; i < numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->mergeNullsFirst[i] = strtobool(token);
+ }
+
+ READ_DONE();
+}
+
+
+/*
+ * _readHashJoin
+ */
+static HashJoin *
+_readHashJoin(void)
+{
+ READ_JOIN_FIELDS(HashJoin);
+
+ READ_NODE_FIELD(hashclauses);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readMaterial
+ */
+static Material *
+_readMaterial(void)
+{
+ READ_PLAN_FIELDS(Material);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readSort
+ */
+static Sort *
+_readSort(void)
+{
+ int i;
+ READ_PLAN_FIELDS(Sort);
+
+ READ_INT_FIELD(numCols);
+
+ token = pg_strtok(&length); /* skip :sortColIdx */
+ local_node->sortColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->sortColIdx[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :sortOperators */
+ local_node->sortOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *oprname; /* operator name */
+ char *leftnspname; /* left type namespace */
+ char *leftname; /* left type name */
+ Oid oprleft; /* left type */
+ char *rightnspname; /* right type namespace */
+ char *rightname; /* right type name */
+ Oid oprright; /* right type */
+ /* token is already set to nspname */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get operator name */
+ oprname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type namespace */
+ leftnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type name */
+ leftname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type namespace */
+ rightnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type name */
+ rightname = nullable_string(token, length);
+ if (leftname)
+ oprleft = get_typname_typid(leftname,
+ NSP_OID(leftnspname));
+ else
+ oprleft = InvalidOid;
+ if (rightname)
+ oprright = get_typname_typid(rightname,
+ NSP_OID(rightnspname));
+ else
+ oprright = InvalidOid;
+ local_node->sortOperators[i] = get_operid(oprname,
+ oprleft,
+ oprright,
+ NSP_OID(nspname));
+ }
+ else
+ local_node->sortOperators[i] = atooid(token);
+ }
+
+ token = pg_strtok(&length); /* skip :collations */
+ local_node->collations = (Oid *) palloc(local_node->numCols * sizeof(Oid));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *collname; /* collation name */
+ int collencoding; /* collation encoding */
+ /* the token is already read */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get collname */
+ collname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get nargs */
+ collencoding = atoi(token);
+ if (collname)
+ local_node->collations[i] = get_collid(collname,
+ collencoding,
+ NSP_OID(nspname));
+ else
+ local_node->collations[i] = InvalidOid;
+ }
+ else
+ local_node->collations[i] = atooid(token);
+ }
+
+ token = pg_strtok(&length); /* skip :nullsFirst */
+ local_node->nullsFirst = (bool *) palloc(local_node->numCols * sizeof(bool));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->nullsFirst[i] = strtobool(token);
+ }
+
+ READ_DONE();
+}
+
+
+/*
+ * _readGroup
+ */
+static Group *
+_readGroup(void)
+{
+ int i;
+ READ_PLAN_FIELDS(Group);
+
+ READ_INT_FIELD(numCols);
+
+ token = pg_strtok(&length); /* skip :grpColIdx */
+ local_node->grpColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->grpColIdx[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :grpOperators */
+ local_node->grpOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *oprname; /* operator name */
+ char *leftnspname; /* left type namespace */
+ char *leftname; /* left type name */
+ Oid oprleft; /* left type */
+ char *rightnspname; /* right type namespace */
+ char *rightname; /* right type name */
+ Oid oprright; /* right type */
+ /* token is already set to nspname */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get operator name */
+ oprname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type namespace */
+ leftnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type name */
+ leftname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type namespace */
+ rightnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type name */
+ rightname = nullable_string(token, length);
+ if (leftname)
+ oprleft = get_typname_typid(leftname,
+ NSP_OID(leftnspname));
+ else
+ oprleft = InvalidOid;
+ if (rightname)
+ oprright = get_typname_typid(rightname,
+ NSP_OID(rightnspname));
+ else
+ oprright = InvalidOid;
+ local_node->grpOperators[i] = get_operid(oprname,
+ oprleft,
+ oprright,
+ NSP_OID(nspname));
+ }
+ else
+ local_node->grpOperators[i] = atooid(token);
+ }
+
+ READ_DONE();
+}
+
+
+/*
+ * _readAgg
+ */
+static Agg *
+_readAgg(void)
+{
+ int i;
+ READ_PLAN_FIELDS(Agg);
+
+ READ_ENUM_FIELD(aggstrategy, AggStrategy);
+ READ_ENUM_FIELD(aggdistribution, AggDistribution);
+ READ_INT_FIELD(numCols);
+
+ token = pg_strtok(&length); /* skip :grpColIdx */
+ local_node->grpColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->grpColIdx[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :grpOperators */
+ local_node->grpOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *oprname; /* operator name */
+ char *leftnspname; /* left type namespace */
+ char *leftname; /* left type name */
+ Oid oprleft; /* left type */
+ char *rightnspname; /* right type namespace */
+ char *rightname; /* right type name */
+ Oid oprright; /* right type */
+ /* token is already set to nspname */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get operator name */
+ oprname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type namespace */
+ leftnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type name */
+ leftname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type namespace */
+ rightnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type name */
+ rightname = nullable_string(token, length);
+ if (leftname)
+ oprleft = get_typname_typid(leftname,
+ NSP_OID(leftnspname));
+ else
+ oprleft = InvalidOid;
+ if (rightname)
+ oprright = get_typname_typid(rightname,
+ NSP_OID(rightnspname));
+ else
+ oprright = InvalidOid;
+ local_node->grpOperators[i] = get_operid(oprname,
+ oprleft,
+ oprright,
+ NSP_OID(nspname));
+ }
+ else
+ local_node->grpOperators[i] = atooid(token);
+ }
+
+ READ_LONG_FIELD(numGroups);
+
+ READ_NODE_FIELD(groupingSets);
+ READ_NODE_FIELD(chain);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readWindowAgg
+ */
+static WindowAgg *
+_readWindowAgg(void)
+{
+ int i;
+ READ_PLAN_FIELDS(WindowAgg);
+
+ READ_INT_FIELD(winref);
+ READ_INT_FIELD(partNumCols);
+
+ token = pg_strtok(&length); /* skip :partColIdx */
+ local_node->partColIdx = (AttrNumber *) palloc(local_node->partNumCols * sizeof(AttrNumber));
+ for (i = 0; i < local_node->partNumCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->partColIdx[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :partOperators */
+ local_node->partOperators = (Oid *) palloc(local_node->partNumCols * sizeof(Oid));
+ for (i = 0; i < local_node->partNumCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *oprname; /* operator name */
+ char *leftnspname; /* left type namespace */
+ char *leftname; /* left type name */
+ Oid oprleft; /* left type */
+ char *rightnspname; /* right type namespace */
+ char *rightname; /* right type name */
+ Oid oprright; /* right type */
+ /* token is already set to nspname */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get operator name */
+ oprname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type namespace */
+ leftnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type name */
+ leftname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type namespace */
+ rightnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type name */
+ rightname = nullable_string(token, length);
+ if (leftname)
+ oprleft = get_typname_typid(leftname,
+ NSP_OID(leftnspname));
+ else
+ oprleft = InvalidOid;
+ if (rightname)
+ oprright = get_typname_typid(rightname,
+ NSP_OID(rightnspname));
+ else
+ oprright = InvalidOid;
+ local_node->partOperators[i] = get_operid(oprname,
+ oprleft,
+ oprright,
+ NSP_OID(nspname));
+ }
+ else
+ local_node->partOperators[i] = atooid(token);
+ }
+
+ READ_INT_FIELD(ordNumCols);
+
+ token = pg_strtok(&length); /* skip :ordColIdx */
+ local_node->ordColIdx = (AttrNumber *) palloc(local_node->ordNumCols * sizeof(AttrNumber));
+ for (i = 0; i < local_node->ordNumCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->ordColIdx[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :ordOperators */
+ local_node->ordOperators = (Oid *) palloc(local_node->ordNumCols * sizeof(Oid));
+ for (i = 0; i < local_node->ordNumCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *oprname; /* operator name */
+ char *leftnspname; /* left type namespace */
+ char *leftname; /* left type name */
+ Oid oprleft; /* left type */
+ char *rightnspname; /* right type namespace */
+ char *rightname; /* right type name */
+ Oid oprright; /* right type */
+ /* token is already set to nspname */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get operator name */
+ oprname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type namespace */
+ leftnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type name */
+ leftname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type namespace */
+ rightnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type name */
+ rightname = nullable_string(token, length);
+ if (leftname)
+ oprleft = get_typname_typid(leftname,
+ NSP_OID(leftnspname));
+ else
+ oprleft = InvalidOid;
+ if (rightname)
+ oprright = get_typname_typid(rightname,
+ NSP_OID(rightnspname));
+ else
+ oprright = InvalidOid;
+ local_node->ordOperators[i] = get_operid(oprname,
+ oprleft,
+ oprright,
+ NSP_OID(nspname));
+ }
+ else
+ local_node->ordOperators[i] = atooid(token);
+ }
+
+ READ_INT_FIELD(frameOptions);
+ READ_NODE_FIELD(startOffset);
+ READ_NODE_FIELD(endOffset);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readUnique
+ */
+static Unique *
+_readUnique(void)
+{
+ int i;
+ READ_PLAN_FIELDS(Unique);
+
+ READ_INT_FIELD(numCols);
+
+ token = pg_strtok(&length); /* skip :uniqColIdx */
+ local_node->uniqColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->uniqColIdx[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :uniqOperators */
+ local_node->uniqOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *oprname; /* operator name */
+ char *leftnspname; /* left type namespace */
+ char *leftname; /* left type name */
+ Oid oprleft; /* left type */
+ char *rightnspname; /* right type namespace */
+ char *rightname; /* right type name */
+ Oid oprright; /* right type */
+ /* token is already set to nspname */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get operator name */
+ oprname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type namespace */
+ leftnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type name */
+ leftname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type namespace */
+ rightnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type name */
+ rightname = nullable_string(token, length);
+ if (leftname)
+ oprleft = get_typname_typid(leftname,
+ NSP_OID(leftnspname));
+ else
+ oprleft = InvalidOid;
+ if (rightname)
+ oprright = get_typname_typid(rightname,
+ NSP_OID(rightnspname));
+ else
+ oprright = InvalidOid;
+ local_node->uniqOperators[i] = get_operid(oprname,
+ oprleft,
+ oprright,
+ NSP_OID(nspname));
+ }
+ else
+ local_node->uniqOperators[i] = atooid(token);
+ }
+
+ READ_DONE();
+}
+
+
+/*
+ * _readHash
+ */
+static Hash *
+_readHash(void)
+{
+ READ_PLAN_FIELDS(Hash);
+
+ if (portable_input)
+ READ_RELID_FIELD(skewTable);
+ else
+ READ_OID_FIELD(skewTable);
+ READ_INT_FIELD(skewColumn);
+ READ_BOOL_FIELD(skewInherit);
+ if (portable_input)
+ READ_TYPID_FIELD(skewColType);
+ else
+ READ_OID_FIELD(skewColType);
+ READ_INT_FIELD(skewColTypmod);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readSetOp
+ */
+static SetOp *
+_readSetOp(void)
+{
+ int i;
+ READ_PLAN_FIELDS(SetOp);
+
+ READ_ENUM_FIELD(cmd, SetOpCmd);
+ READ_ENUM_FIELD(strategy, SetOpStrategy);
+ READ_INT_FIELD(numCols);
+
+ token = pg_strtok(&length); /* skip :dupColIdx */
+ local_node->dupColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->dupColIdx[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :dupOperators */
+ local_node->dupOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->dupOperators[i] = atooid(token);
+ }
+
+ READ_INT_FIELD(flagColIdx);
+ READ_INT_FIELD(firstFlag);
+ READ_LONG_FIELD(numGroups);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readLimit
+ */
+static Limit *
+_readLimit(void)
+{
+ READ_PLAN_FIELDS(Limit);
+
+ READ_NODE_FIELD(limitOffset);
+ READ_NODE_FIELD(limitCount);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readRemoteSubplan
+ */
+static RemoteSubplan *
+_readRemoteSubplan(void)
+{
+ READ_SCAN_FIELDS(RemoteSubplan);
+
+ READ_CHAR_FIELD(distributionType);
+ READ_INT_FIELD(distributionKey);
+ READ_NODE_FIELD(distributionNodes);
+ READ_NODE_FIELD(distributionRestrict);
+ READ_NODE_FIELD(nodeList);
+ READ_BOOL_FIELD(execOnAll);
+ READ_NODE_FIELD(sort);
+ READ_STRING_FIELD(cursor);
+ READ_INT_FIELD(unique);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readRemoteStmt
+ */
+static RemoteStmt *
+_readRemoteStmt(void)
+{
+ int i;
+ READ_LOCALS(RemoteStmt);
+
+ READ_ENUM_FIELD(commandType, CmdType);
+ READ_BOOL_FIELD(hasReturning);
+ READ_NODE_FIELD(planTree);
+ READ_NODE_FIELD(rtable);
+ READ_NODE_FIELD(resultRelations);
+ READ_NODE_FIELD(subplans);
+ READ_INT_FIELD(nParamExec);
+ READ_INT_FIELD(nParamRemote);
+ if (local_node->nParamRemote > 0)
+ {
+ local_node->remoteparams = (RemoteParam *) palloc(
+ local_node->nParamRemote * sizeof(RemoteParam));
+ for (i = 0; i < local_node->nParamRemote; i++)
+ {
+ RemoteParam *rparam = &(local_node->remoteparams[i]);
+ token = pg_strtok(&length); /* skip :paramkind */
+ token = pg_strtok(&length);
+ rparam->paramkind = (ParamKind) atoi(token);
+
+ token = pg_strtok(&length); /* skip :paramid */
+ token = pg_strtok(&length);
+ rparam->paramid = atoi(token);
+
+ token = pg_strtok(&length); /* skip :paramused */
+ token = pg_strtok(&length);
+ rparam->paramused = atoi(token);
+
+ token = pg_strtok(&length); /* skip :paramtype */
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *typname; /* data type name */
+ token = pg_strtok(&length); /* get nspname */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get typname */
+ typname = nullable_string(token, length);
+ if (typname)
+ rparam->paramtype = get_typname_typid(typname,
+ NSP_OID(nspname));
+ else
+ rparam->paramtype = InvalidOid;
+ }
+ else
+ {
+ token = pg_strtok(&length);
+ rparam->paramtype = atooid(token);
+ }
+ }
+ }
+ else
+ local_node->remoteparams = NULL;
+
+ READ_NODE_FIELD(rowMarks);
+ READ_CHAR_FIELD(distributionType);
+ READ_INT_FIELD(distributionKey);
+ READ_NODE_FIELD(distributionNodes);
+ READ_NODE_FIELD(distributionRestrict);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readSimpleSort
+ */
+static SimpleSort *
+_readSimpleSort(void)
+{
+ int i;
+ READ_LOCALS(SimpleSort);
+
+ READ_INT_FIELD(numCols);
+
+ token = pg_strtok(&length); /* skip :sortColIdx */
+ local_node->sortColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->sortColIdx[i] = atoi(token);
+ }
+
+ token = pg_strtok(&length); /* skip :sortOperators */
+ local_node->sortOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *oprname; /* operator name */
+ char *leftnspname; /* left type namespace */
+ char *leftname; /* left type name */
+ Oid oprleft; /* left type */
+ char *rightnspname; /* right type namespace */
+ char *rightname; /* right type name */
+ Oid oprright; /* right type */
+ /* token is already set to nspname */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get operator name */
+ oprname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type namespace */
+ leftnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* left type name */
+ leftname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type namespace */
+ rightnspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* right type name */
+ rightname = nullable_string(token, length);
+ if (leftname)
+ oprleft = get_typname_typid(leftname,
+ NSP_OID(leftnspname));
+ else
+ oprleft = InvalidOid;
+ if (rightname)
+ oprright = get_typname_typid(rightname,
+ NSP_OID(rightnspname));
+ else
+ oprright = InvalidOid;
+ local_node->sortOperators[i] = get_operid(oprname,
+ oprleft,
+ oprright,
+ NSP_OID(nspname));
+ }
+ else
+ local_node->sortOperators[i] = atooid(token);
+ }
+
+ token = pg_strtok(&length); /* skip :sortCollations */
+ local_node->sortCollations = (Oid *) palloc(local_node->numCols * sizeof(Oid));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ if (portable_input)
+ {
+ char *nspname; /* namespace name */
+ char *collname; /* collation name */
+ int collencoding; /* collation encoding */
+ /* the token is already read */
+ nspname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get collname */
+ collname = nullable_string(token, length);
+ token = pg_strtok(&length); /* get nargs */
+ collencoding = atoi(token);
+ if (collname)
+ local_node->sortCollations[i] = get_collid(collname,
+ collencoding,
+ NSP_OID(nspname));
+ else
+ local_node->sortCollations[i] = InvalidOid;
+ }
+ else
+ local_node->sortCollations[i] = atooid(token);
+ }
+
+ token = pg_strtok(&length); /* skip :nullsFirst */
+ local_node->nullsFirst = (bool *) palloc(local_node->numCols * sizeof(bool));
+ for (i = 0; i < local_node->numCols; i++)
+ {
+ token = pg_strtok(&length);
+ local_node->nullsFirst[i] = strtobool(token);
+ }
+
+ READ_DONE();
+}
+
+
+/*
+ * _readNestLoopParam
+ */
+static NestLoopParam *
+_readNestLoopParam(void)
+{
+ READ_LOCALS(NestLoopParam);
+
+ READ_INT_FIELD(paramno);
+ READ_NODE_FIELD(paramval);
+
+ READ_DONE();
+}
+
+
+/*
+ * _readPlanRowMark
+ */
+static PlanRowMark *
+_readPlanRowMark(void)
+{
+ READ_LOCALS(PlanRowMark);
+
+ READ_UINT_FIELD(rti);
+ READ_UINT_FIELD(prti);
+ READ_UINT_FIELD(rowmarkId);
+ READ_ENUM_FIELD(markType, RowMarkType);
+ READ_INT_FIELD(allMarkTypes);
+ READ_ENUM_FIELD(strength, LockClauseStrength);
+ READ_ENUM_FIELD(waitPolicy, LockWaitPolicy);
+ READ_BOOL_FIELD(isParent);
+
+ READ_DONE();
+}
+
+/*
+ * _readLockRows
+ */
+static LockRows *
+_readLockRows(void)
+{
+ READ_PLAN_FIELDS(LockRows);
+
+ READ_NODE_FIELD(rowMarks);
+ READ_INT_FIELD(epqParam);
+
+ READ_DONE();
+}
+
+#endif /* XCP */
+
+
+/*
+ * parseNodeString
+ *
+ * Given a character string representing a node tree, parseNodeString creates
+ * the internal node structure.
+ *
+ * The string to be read must already have been loaded into pg_strtok().
+ */
+Node *
+parseNodeString(void)
+{
+ void *return_value;
+
+ READ_TEMP_LOCALS();
+
+ token = pg_strtok(&length);
+
+#define MATCH(tokname, namelen) \
+ (length == namelen && memcmp(token, tokname, namelen) == 0)
+
+ if (MATCH("QUERY", 5))
+ return_value = _readQuery();
+ else if (MATCH("WITHCHECKOPTION", 15))
+ return_value = _readWithCheckOption();
+ else if (MATCH("SORTGROUPCLAUSE", 15))
+ return_value = _readSortGroupClause();
+ else if (MATCH("GROUPINGSET", 11))
+ return_value = _readGroupingSet();
+ else if (MATCH("WINDOWCLAUSE", 12))
+ return_value = _readWindowClause();
+ else if (MATCH("ROWMARKCLAUSE", 13))
+ return_value = _readRowMarkClause();
+ else if (MATCH("COMMONTABLEEXPR", 15))
+ return_value = _readCommonTableExpr();
+ else if (MATCH("SETOPERATIONSTMT", 16))
+ return_value = _readSetOperationStmt();
+ else if (MATCH("ALIAS", 5))
+ return_value = _readAlias();
+ else if (MATCH("RANGEVAR", 8))
+ return_value = _readRangeVar();
+ else if (MATCH("INTOCLAUSE", 10))
+ return_value = _readIntoClause();
+ else if (MATCH("VAR", 3))
+ return_value = _readVar();
+ else if (MATCH("CONST", 5))
+ return_value = _readConst();
+ else if (MATCH("PARAM", 5))
+ return_value = _readParam();
+ else if (MATCH("AGGREF", 6))
+ return_value = _readAggref();
+ else if (MATCH("GROUPINGFUNC", 12))
+ return_value = _readGroupingFunc();
+ else if (MATCH("WINDOWFUNC", 10))
+ return_value = _readWindowFunc();
+ else if (MATCH("ARRAYREF", 8))
+ return_value = _readArrayRef();
+ else if (MATCH("FUNCEXPR", 8))
+ return_value = _readFuncExpr();
+ else if (MATCH("NAMEDARGEXPR", 12))
+ return_value = _readNamedArgExpr();
+ else if (MATCH("OPEXPR", 6))
+ return_value = _readOpExpr();
+ else if (MATCH("DISTINCTEXPR", 12))
+ return_value = _readDistinctExpr();
+ else if (MATCH("NULLIFEXPR", 10))
+ return_value = _readNullIfExpr();
+ else if (MATCH("SCALARARRAYOPEXPR", 17))
+ return_value = _readScalarArrayOpExpr();
+ else if (MATCH("BOOLEXPR", 8))
+ return_value = _readBoolExpr();
+ else if (MATCH("SUBLINK", 7))
+ return_value = _readSubLink();
+#ifdef XCP
+ else if (MATCH("SUBPLAN", 7))
+ return_value = _readSubPlan();
+#endif
+ else if (MATCH("FIELDSELECT", 11))
+ return_value = _readFieldSelect();
+ else if (MATCH("FIELDSTORE", 10))
+ return_value = _readFieldStore();
+ else if (MATCH("RELABELTYPE", 11))
+ return_value = _readRelabelType();
+ else if (MATCH("COERCEVIAIO", 11))
+ return_value = _readCoerceViaIO();
+ else if (MATCH("ARRAYCOERCEEXPR", 15))
+ return_value = _readArrayCoerceExpr();
+ else if (MATCH("CONVERTROWTYPEEXPR", 18))
+ return_value = _readConvertRowtypeExpr();
+ else if (MATCH("COLLATE", 7))
+ return_value = _readCollateExpr();
+ else if (MATCH("CASE", 4))
+ return_value = _readCaseExpr();
+ else if (MATCH("WHEN", 4))
+ return_value = _readCaseWhen();
+ else if (MATCH("CASETESTEXPR", 12))
+ return_value = _readCaseTestExpr();
+ else if (MATCH("ARRAY", 5))
+ return_value = _readArrayExpr();
+ else if (MATCH("ROW", 3))
+ return_value = _readRowExpr();
+ else if (MATCH("ROWCOMPARE", 10))
+ return_value = _readRowCompareExpr();
+ else if (MATCH("COALESCE", 8))
+ return_value = _readCoalesceExpr();
+ else if (MATCH("MINMAX", 6))
+ return_value = _readMinMaxExpr();
+ else if (MATCH("XMLEXPR", 7))
+ return_value = _readXmlExpr();
+ else if (MATCH("NULLTEST", 8))
+ return_value = _readNullTest();
+ else if (MATCH("BOOLEANTEST", 11))
+ return_value = _readBooleanTest();
+ else if (MATCH("COERCETODOMAIN", 14))
+ return_value = _readCoerceToDomain();
+ else if (MATCH("COERCETODOMAINVALUE", 19))
+ return_value = _readCoerceToDomainValue();
+ else if (MATCH("SETTODEFAULT", 12))
+ return_value = _readSetToDefault();
+ else if (MATCH("CURRENTOFEXPR", 13))
+ return_value = _readCurrentOfExpr();
+ else if (MATCH("INFERENCEELEM", 13))
+ return_value = _readInferenceElem();
+ else if (MATCH("TARGETENTRY", 11))
+ return_value = _readTargetEntry();
+ else if (MATCH("RANGETBLREF", 11))
+ return_value = _readRangeTblRef();
+ else if (MATCH("JOINEXPR", 8))
+ return_value = _readJoinExpr();
+ else if (MATCH("FROMEXPR", 8))
+ return_value = _readFromExpr();
+ else if (MATCH("ONCONFLICTEXPR", 14))
+ return_value = _readOnConflictExpr();
+ else if (MATCH("RTE", 3))
+ return_value = _readRangeTblEntry();
+ else if (MATCH("RANGETBLFUNCTION", 16))
+ return_value = _readRangeTblFunction();
+ else if (MATCH("TABLESAMPLECLAUSE", 17))
+ return_value = _readTableSampleClause();
+ else if (MATCH("NOTIFY", 6))
+ return_value = _readNotifyStmt();
+ else if (MATCH("DECLARECURSOR", 13))
+ return_value = _readDeclareCursorStmt();
+#ifdef XCP
+ else if (MATCH("PLAN", 4))
+ return_value = _readPlan();
+ else if (MATCH("RESULT", 6))
+ return_value = _readResult();
+ else if (MATCH("MODIFYTABLE", 11))
+ return_value = _readModifyTable();
+ else if (MATCH("APPEND", 6))
+ return_value = _readAppend();
+ else if (MATCH("MERGEAPPEND", 11))
+ return_value = _readMergeAppend();
+ else if (MATCH("RECURSIVEUNION", 14))
+ return_value = _readRecursiveUnion();
+ else if (MATCH("BITMAPAND", 9))
+ return_value = _readBitmapAnd();
+ else if (MATCH("BITMAPOR", 8))
+ return_value = _readBitmapOr();
+ else if (MATCH("SCAN", 4))
+ return_value = _readScan();
+ else if (MATCH("SEQSCAN", 7))
+ return_value = _readSeqScan();
+ else if (MATCH("SAMPLESCAN", 10))
+ return_value = _readSampleScan();
+ else if (MATCH("INDEXSCAN", 9))
+ return_value = _readIndexScan();
+ else if (MATCH("INDEXONLYSCAN", 13))
+ return_value = _readIndexOnlyScan();
+ else if (MATCH("BITMAPINDEXSCAN", 15))
+ return_value = _readBitmapIndexScan();
+ else if (MATCH("BITMAPHEAPSCAN", 14))
+ return_value = _readBitmapHeapScan();
+ else if (MATCH("TIDSCAN", 7))
+ return_value = _readTidScan();
+ else if (MATCH("SUBQUERYSCAN", 12))
+ return_value = _readSubqueryScan();
+ else if (MATCH("FUNCTIONSCAN", 12))
+ return_value = _readFunctionScan();
+ else if (MATCH("VALUESSCAN", 10))
+ return_value = _readValuesScan();
+ else if (MATCH("CTESCAN", 7))
+ return_value = _readCteScan();
+ else if (MATCH("WORKTABLESCAN", 13))
+ return_value = _readWorkTableScan();
+ else if (MATCH("JOIN", 4))
+ return_value = _readJoin();
+ else if (MATCH("NESTLOOP", 8))
+ return_value = _readNestLoop();
+ else if (MATCH("MERGEJOIN", 9))
+ return_value = _readMergeJoin();
+ else if (MATCH("HASHJOIN", 8))
+ return_value = _readHashJoin();
+ else if (MATCH("MATERIAL", 8))
+ return_value = _readMaterial();
+ else if (MATCH("SORT", 4))
+ return_value = _readSort();
+ else if (MATCH("GROUP", 5))
+ return_value = _readGroup();
+ else if (MATCH("AGG", 3))
+ return_value = _readAgg();
+ else if (MATCH("WINDOWAGG", 9))
+ return_value = _readWindowAgg();
+ else if (MATCH("UNIQUE", 6))
+ return_value = _readUnique();
+ else if (MATCH("HASH", 4))
+ return_value = _readHash();
+ else if (MATCH("SETOP", 5))
+ return_value = _readSetOp();
+ else if (MATCH("LIMIT", 5))
+ return_value = _readLimit();
+ else if (MATCH("REMOTESUBPLAN", 13))
+ return_value = _readRemoteSubplan();
+ else if (MATCH("REMOTESTMT", 10))
+ return_value = _readRemoteStmt();
+ else if (MATCH("SIMPLESORT", 10))
+ return_value = _readSimpleSort();
+ else if (MATCH("NESTLOOPPARAM", 13))
+ return_value = _readNestLoopParam();
+ else if (MATCH("PLANROWMARK", 11))
+ return_value = _readPlanRowMark();
+ else if (MATCH("LOCKROWS", 8))
+ return_value = _readLockRows();
+#endif
+ else
+ {
+ elog(ERROR, "badly formatted node string \"%.32s\"...", token);
+ return_value = NULL; /* keep compiler quiet */
+ }
++=======
++ * _readDefElem
++ */
++static DefElem *
++_readDefElem(void)
++{
++ READ_LOCALS(DefElem);
+
- return (Node *) return_value;
++ READ_STRING_FIELD(defnamespace);
++ READ_STRING_FIELD(defname);
++ READ_NODE_FIELD(arg);
++ READ_ENUM_FIELD(defaction, DefElemAction);
++
++ READ_DONE();
+}
+
++/*
++ * _readPlannedStmt
++ */
++static PlannedStmt *
++_readPlannedStmt(void)
++{
++ READ_LOCALS(PlannedStmt);
++
++ READ_ENUM_FIELD(commandType, CmdType);
++ READ_UINT_FIELD(queryId);
++ READ_BOOL_FIELD(hasReturning);
++ READ_BOOL_FIELD(hasModifyingCTE);
++ READ_BOOL_FIELD(canSetTag);
++ READ_BOOL_FIELD(transientPlan);
++ READ_BOOL_FIELD(dependsOnRole);
++ READ_BOOL_FIELD(parallelModeNeeded);
++ READ_NODE_FIELD(planTree);
++ READ_NODE_FIELD(rtable);
++ READ_NODE_FIELD(resultRelations);
++ READ_NODE_FIELD(utilityStmt);
++ READ_NODE_FIELD(subplans);
++ READ_BITMAPSET_FIELD(rewindPlanIDs);
++ READ_NODE_FIELD(rowMarks);
++ READ_NODE_FIELD(relationOids);
++ READ_NODE_FIELD(invalItems);
++ READ_INT_FIELD(nParamExec);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
++
++ READ_DONE();
++}
+
+/*
- * readDatum
- *
- * Given a string representation of a constant, recreate the appropriate
- * Datum. The string representation embeds length info, but not byValue,
- * so we must be told that.
++ * ReadCommonPlan
++ * Assign the basic stuff of all nodes that inherit from Plan
+ */
- static Datum
- readDatum(bool typbyval)
++static void
++ReadCommonPlan(Plan *local_node)
+{
- Size length,
- i;
- int tokenLength;
- char *token;
- Datum res;
- char *s;
++ READ_TEMP_LOCALS();
+
- /*
- * read the actual length of the value
- */
- token = pg_strtok(&tokenLength);
- length = atoui(token);
++ READ_FLOAT_FIELD(startup_cost);
++ READ_FLOAT_FIELD(total_cost);
++ READ_FLOAT_FIELD(plan_rows);
++ READ_INT_FIELD(plan_width);
++ READ_BOOL_FIELD(parallel_aware);
++ READ_INT_FIELD(plan_node_id);
++ READ_NODE_FIELD(targetlist);
++ READ_NODE_FIELD(qual);
++ READ_NODE_FIELD(lefttree);
++ READ_NODE_FIELD(righttree);
++ READ_NODE_FIELD(initPlan);
++ READ_BITMAPSET_FIELD(extParam);
++ READ_BITMAPSET_FIELD(allParam);
++}
+
- token = pg_strtok(&tokenLength); /* read the '[' */
- if (token == NULL || token[0] != '[')
- elog(ERROR, "expected \"[\" to start datum, but got \"%s\"; length = %zu",
- token ? (const char *) token : "[NULL]", length);
++/*
++ * _readPlan
++ */
++static Plan *
++_readPlan(void)
++{
++ READ_LOCALS_NO_FIELDS(Plan);
+
- if (typbyval)
- {
- if (length > (Size) sizeof(Datum))
- elog(ERROR, "byval datum but length = %zu", length);
- res = (Datum) 0;
- s = (char *) (&res);
- for (i = 0; i < (Size) sizeof(Datum); i++)
- {
- token = pg_strtok(&tokenLength);
- s[i] = (char) atoi(token);
- }
- }
- else if (length <= 0)
- res = (Datum) NULL;
- else
- {
- s = (char *) palloc(length);
- for (i = 0; i < length; i++)
- {
- token = pg_strtok(&tokenLength);
- s[i] = (char) atoi(token);
- }
- res = PointerGetDatum(s);
- }
++ ReadCommonPlan(local_node);
+
- token = pg_strtok(&tokenLength); /* read the ']' */
- if (token == NULL || token[0] != ']')
- elog(ERROR, "expected \"]\" to end datum, but got \"%s\"; length = %zu",
- token ? (const char *) token : "[NULL]", length);
++ READ_DONE();
++}
+
- return res;
++/*
++ * _readResult
++ */
++static Result *
++_readResult(void)
++{
++ READ_LOCALS(Result);
++
++ ReadCommonPlan(&local_node->plan);
++
++ READ_NODE_FIELD(resconstantqual);
++
++ READ_DONE();
+}
+
- #ifdef XCP
+/*
- * scanDatum
- *
- * Recreate Datum from the text format understandable by the input function
- * of the specified data type.
++ * _readModifyTable
+ */
- static Datum
- scanDatum(Oid typid, int typmod)
++static ModifyTable *
++_readModifyTable(void)
+{
- Oid typInput;
- Oid typioparam;
- FmgrInfo finfo;
- FunctionCallInfoData fcinfo;
- char *value;
- Datum res;
- READ_TEMP_LOCALS();
++ READ_LOCALS(ModifyTable);
+
- /* Get input function for the type */
- getTypeInputInfo(typid, &typInput, &typioparam);
- fmgr_info(typInput, &finfo);
++ ReadCommonPlan(&local_node->plan);
+
- /* Read the value */
- token = pg_strtok(&length);
- value = nullable_string(token, length);
++ READ_ENUM_FIELD(operation, CmdType);
++ READ_BOOL_FIELD(canSetTag);
++ READ_UINT_FIELD(nominalRelation);
++ READ_NODE_FIELD(resultRelations);
++ READ_INT_FIELD(resultRelIndex);
++ READ_NODE_FIELD(plans);
++ READ_NODE_FIELD(withCheckOptionLists);
++ READ_NODE_FIELD(returningLists);
++ READ_NODE_FIELD(fdwPrivLists);
++ READ_BITMAPSET_FIELD(fdwDirectModifyPlans);
++ READ_NODE_FIELD(rowMarks);
++ READ_INT_FIELD(epqParam);
++ READ_ENUM_FIELD(onConflictAction, OnConflictAction);
++ READ_NODE_FIELD(arbiterIndexes);
++ READ_NODE_FIELD(onConflictSet);
++ READ_NODE_FIELD(onConflictWhere);
++ READ_UINT_FIELD(exclRelRTI);
++ READ_NODE_FIELD(exclRelTlist);
+
- /* The value can not be NULL, so we actually received empty string */
- if (value == NULL)
- value = "";
++ READ_DONE();
++}
+
- /* Invoke input function */
- InitFunctionCallInfoData(fcinfo, &finfo, 3, InvalidOid, NULL, NULL);
++/*
++ * _readAppend
++ */
++static Append *
++_readAppend(void)
++{
++ READ_LOCALS(Append);
+
- fcinfo.arg[0] = CStringGetDatum(value);
- fcinfo.arg[1] = ObjectIdGetDatum(typioparam);
- fcinfo.arg[2] = Int32GetDatum(typmod);
- fcinfo.argnull[0] = false;
- fcinfo.argnull[1] = false;
- fcinfo.argnull[2] = false;
++ ReadCommonPlan(&local_node->plan);
+
- res = FunctionCallInvoke(&fcinfo);
++ READ_NODE_FIELD(appendplans);
+
- return res;
++ READ_DONE();
+}
- #endif
++
++/*
++ * _readMergeAppend
++ */
++static MergeAppend *
++_readMergeAppend(void)
++{
++ READ_LOCALS(MergeAppend);
++
++ ReadCommonPlan(&local_node->plan);
++
++ READ_NODE_FIELD(mergeplans);
++ READ_INT_FIELD(numCols);
++ READ_ATTRNUMBER_ARRAY(sortColIdx, local_node->numCols);
++ READ_OID_ARRAY(sortOperators, local_node->numCols);
++ READ_OID_ARRAY(collations, local_node->numCols);
++ READ_BOOL_ARRAY(nullsFirst, local_node->numCols);
++
++ READ_DONE();
++}
++
++/*
++ * _readRecursiveUnion
++ */
++static RecursiveUnion *
++_readRecursiveUnion(void)
++{
++ READ_LOCALS(RecursiveUnion);
++
++ ReadCommonPlan(&local_node->plan);
++
++ READ_INT_FIELD(wtParam);
++ READ_INT_FIELD(numCols);
++ READ_ATTRNUMBER_ARRAY(dupColIdx, local_node->numCols);
++ READ_OID_ARRAY(dupOperators, local_node->numCols);
++ READ_LONG_FIELD(numGroups);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readBitmapAnd
+ */
+ static BitmapAnd *
+ _readBitmapAnd(void)
+ {
+ READ_LOCALS(BitmapAnd);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_NODE_FIELD(bitmapplans);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readBitmapOr
+ */
+ static BitmapOr *
+ _readBitmapOr(void)
+ {
+ READ_LOCALS(BitmapOr);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_NODE_FIELD(bitmapplans);
+
+ READ_DONE();
+ }
+
+ /*
+ * ReadCommonScan
+ * Assign the basic stuff of all nodes that inherit from Scan
+ */
+ static void
+ ReadCommonScan(Scan *local_node)
+ {
+ READ_TEMP_LOCALS();
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_UINT_FIELD(scanrelid);
+ }
+
+ /*
+ * _readScan
+ */
+ static Scan *
+ _readScan(void)
+ {
+ READ_LOCALS_NO_FIELDS(Scan);
+
+ ReadCommonScan(local_node);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readSeqScan
+ */
+ static SeqScan *
+ _readSeqScan(void)
+ {
+ READ_LOCALS_NO_FIELDS(SeqScan);
+
+ ReadCommonScan(local_node);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readSampleScan
+ */
+ static SampleScan *
+ _readSampleScan(void)
+ {
+ READ_LOCALS(SampleScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_NODE_FIELD(tablesample);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readIndexScan
+ */
+ static IndexScan *
+ _readIndexScan(void)
+ {
+ READ_LOCALS(IndexScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_OID_FIELD(indexid);
+ READ_NODE_FIELD(indexqual);
+ READ_NODE_FIELD(indexqualorig);
+ READ_NODE_FIELD(indexorderby);
+ READ_NODE_FIELD(indexorderbyorig);
+ READ_NODE_FIELD(indexorderbyops);
+ READ_ENUM_FIELD(indexorderdir, ScanDirection);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readIndexOnlyScan
+ */
+ static IndexOnlyScan *
+ _readIndexOnlyScan(void)
+ {
+ READ_LOCALS(IndexOnlyScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_OID_FIELD(indexid);
+ READ_NODE_FIELD(indexqual);
+ READ_NODE_FIELD(indexorderby);
+ READ_NODE_FIELD(indextlist);
+ READ_ENUM_FIELD(indexorderdir, ScanDirection);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readBitmapIndexScan
+ */
+ static BitmapIndexScan *
+ _readBitmapIndexScan(void)
+ {
+ READ_LOCALS(BitmapIndexScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_OID_FIELD(indexid);
+ READ_NODE_FIELD(indexqual);
+ READ_NODE_FIELD(indexqualorig);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readBitmapHeapScan
+ */
+ static BitmapHeapScan *
+ _readBitmapHeapScan(void)
+ {
+ READ_LOCALS(BitmapHeapScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_NODE_FIELD(bitmapqualorig);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readTidScan
+ */
+ static TidScan *
+ _readTidScan(void)
+ {
+ READ_LOCALS(TidScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_NODE_FIELD(tidquals);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readSubqueryScan
+ */
+ static SubqueryScan *
+ _readSubqueryScan(void)
+ {
+ READ_LOCALS(SubqueryScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_NODE_FIELD(subplan);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readFunctionScan
+ */
+ static FunctionScan *
+ _readFunctionScan(void)
+ {
+ READ_LOCALS(FunctionScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_NODE_FIELD(functions);
+ READ_BOOL_FIELD(funcordinality);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readValuesScan
+ */
+ static ValuesScan *
+ _readValuesScan(void)
+ {
+ READ_LOCALS(ValuesScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_NODE_FIELD(values_lists);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readCteScan
+ */
+ static CteScan *
+ _readCteScan(void)
+ {
+ READ_LOCALS(CteScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_INT_FIELD(ctePlanId);
+ READ_INT_FIELD(cteParam);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readWorkTableScan
+ */
+ static WorkTableScan *
+ _readWorkTableScan(void)
+ {
+ READ_LOCALS(WorkTableScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_INT_FIELD(wtParam);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readForeignScan
+ */
+ static ForeignScan *
+ _readForeignScan(void)
+ {
+ READ_LOCALS(ForeignScan);
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_ENUM_FIELD(operation, CmdType);
+ READ_OID_FIELD(fs_server);
+ READ_NODE_FIELD(fdw_exprs);
+ READ_NODE_FIELD(fdw_private);
+ READ_NODE_FIELD(fdw_scan_tlist);
+ READ_NODE_FIELD(fdw_recheck_quals);
+ READ_BITMAPSET_FIELD(fs_relids);
+ READ_BOOL_FIELD(fsSystemCol);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readCustomScan
+ */
+ static CustomScan *
+ _readCustomScan(void)
+ {
+ READ_LOCALS(CustomScan);
+ char *custom_name;
+ const CustomScanMethods *methods;
+
+ ReadCommonScan(&local_node->scan);
+
+ READ_UINT_FIELD(flags);
+ READ_NODE_FIELD(custom_plans);
+ READ_NODE_FIELD(custom_exprs);
+ READ_NODE_FIELD(custom_private);
+ READ_NODE_FIELD(custom_scan_tlist);
+ READ_BITMAPSET_FIELD(custom_relids);
+
+ /* Lookup CustomScanMethods by CustomName */
+ token = pg_strtok(&length); /* skip methods: */
+ token = pg_strtok(&length); /* CustomName */
+ custom_name = nullable_string(token, length);
+ methods = GetCustomScanMethods(custom_name, false);
+ local_node->methods = methods;
+
+ READ_DONE();
+ }
+
+ /*
+ * ReadCommonJoin
+ * Assign the basic stuff of all nodes that inherit from Join
+ */
+ static void
+ ReadCommonJoin(Join *local_node)
+ {
+ READ_TEMP_LOCALS();
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_ENUM_FIELD(jointype, JoinType);
+ READ_NODE_FIELD(joinqual);
+ }
+
+ /*
+ * _readJoin
+ */
+ static Join *
+ _readJoin(void)
+ {
+ READ_LOCALS_NO_FIELDS(Join);
+
+ ReadCommonJoin(local_node);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readNestLoop
+ */
+ static NestLoop *
+ _readNestLoop(void)
+ {
+ READ_LOCALS(NestLoop);
+
+ ReadCommonJoin(&local_node->join);
+
+ READ_NODE_FIELD(nestParams);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readMergeJoin
+ */
+ static MergeJoin *
+ _readMergeJoin(void)
+ {
+ int numCols;
+
+ READ_LOCALS(MergeJoin);
+
+ ReadCommonJoin(&local_node->join);
+
+ READ_NODE_FIELD(mergeclauses);
+
+ numCols = list_length(local_node->mergeclauses);
+
+ READ_OID_ARRAY(mergeFamilies, numCols);
+ READ_OID_ARRAY(mergeCollations, numCols);
+ READ_INT_ARRAY(mergeStrategies, numCols);
+ READ_BOOL_ARRAY(mergeNullsFirst, numCols);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readHashJoin
+ */
+ static HashJoin *
+ _readHashJoin(void)
+ {
+ READ_LOCALS(HashJoin);
+
+ ReadCommonJoin(&local_node->join);
+
+ READ_NODE_FIELD(hashclauses);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readMaterial
+ */
+ static Material *
+ _readMaterial(void)
+ {
+ READ_LOCALS_NO_FIELDS(Material);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readSort
+ */
+ static Sort *
+ _readSort(void)
+ {
+ READ_LOCALS(Sort);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_INT_FIELD(numCols);
+ READ_ATTRNUMBER_ARRAY(sortColIdx, local_node->numCols);
+ READ_OID_ARRAY(sortOperators, local_node->numCols);
+ READ_OID_ARRAY(collations, local_node->numCols);
+ READ_BOOL_ARRAY(nullsFirst, local_node->numCols);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readGroup
+ */
+ static Group *
+ _readGroup(void)
+ {
+ READ_LOCALS(Group);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_INT_FIELD(numCols);
+ READ_ATTRNUMBER_ARRAY(grpColIdx, local_node->numCols);
+ READ_OID_ARRAY(grpOperators, local_node->numCols);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readAgg
+ */
+ static Agg *
+ _readAgg(void)
+ {
+ READ_LOCALS(Agg);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_ENUM_FIELD(aggstrategy, AggStrategy);
+ READ_ENUM_FIELD(aggsplit, AggSplit);
+ READ_INT_FIELD(numCols);
+ READ_ATTRNUMBER_ARRAY(grpColIdx, local_node->numCols);
+ READ_OID_ARRAY(grpOperators, local_node->numCols);
+ READ_LONG_FIELD(numGroups);
+ READ_NODE_FIELD(groupingSets);
+ READ_NODE_FIELD(chain);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readWindowAgg
+ */
+ static WindowAgg *
+ _readWindowAgg(void)
+ {
+ READ_LOCALS(WindowAgg);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_UINT_FIELD(winref);
+ READ_INT_FIELD(partNumCols);
+ READ_ATTRNUMBER_ARRAY(partColIdx, local_node->partNumCols);
+ READ_OID_ARRAY(partOperators, local_node->partNumCols);
+ READ_INT_FIELD(ordNumCols);
+ READ_ATTRNUMBER_ARRAY(ordColIdx, local_node->ordNumCols);
+ READ_OID_ARRAY(ordOperators, local_node->ordNumCols);
+ READ_INT_FIELD(frameOptions);
+ READ_NODE_FIELD(startOffset);
+ READ_NODE_FIELD(endOffset);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readUnique
+ */
+ static Unique *
+ _readUnique(void)
+ {
+ READ_LOCALS(Unique);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_INT_FIELD(numCols);
+ READ_ATTRNUMBER_ARRAY(uniqColIdx, local_node->numCols);
+ READ_OID_ARRAY(uniqOperators, local_node->numCols);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readGather
+ */
+ static Gather *
+ _readGather(void)
+ {
+ READ_LOCALS(Gather);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_INT_FIELD(num_workers);
+ READ_BOOL_FIELD(single_copy);
+ READ_BOOL_FIELD(invisible);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readHash
+ */
+ static Hash *
+ _readHash(void)
+ {
+ READ_LOCALS(Hash);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_OID_FIELD(skewTable);
+ READ_INT_FIELD(skewColumn);
+ READ_BOOL_FIELD(skewInherit);
+ READ_OID_FIELD(skewColType);
+ READ_INT_FIELD(skewColTypmod);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readSetOp
+ */
+ static SetOp *
+ _readSetOp(void)
+ {
+ READ_LOCALS(SetOp);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_ENUM_FIELD(cmd, SetOpCmd);
+ READ_ENUM_FIELD(strategy, SetOpStrategy);
+ READ_INT_FIELD(numCols);
+ READ_ATTRNUMBER_ARRAY(dupColIdx, local_node->numCols);
+ READ_OID_ARRAY(dupOperators, local_node->numCols);
+ READ_INT_FIELD(flagColIdx);
+ READ_INT_FIELD(firstFlag);
+ READ_LONG_FIELD(numGroups);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readLockRows
+ */
+ static LockRows *
+ _readLockRows(void)
+ {
+ READ_LOCALS(LockRows);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_NODE_FIELD(rowMarks);
+ READ_INT_FIELD(epqParam);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readLimit
+ */
+ static Limit *
+ _readLimit(void)
+ {
+ READ_LOCALS(Limit);
+
+ ReadCommonPlan(&local_node->plan);
+
+ READ_NODE_FIELD(limitOffset);
+ READ_NODE_FIELD(limitCount);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readNestLoopParam
+ */
+ static NestLoopParam *
+ _readNestLoopParam(void)
+ {
+ READ_LOCALS(NestLoopParam);
+
+ READ_INT_FIELD(paramno);
+ READ_NODE_FIELD(paramval);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readPlanRowMark
+ */
+ static PlanRowMark *
+ _readPlanRowMark(void)
+ {
+ READ_LOCALS(PlanRowMark);
+
+ READ_UINT_FIELD(rti);
+ READ_UINT_FIELD(prti);
+ READ_UINT_FIELD(rowmarkId);
+ READ_ENUM_FIELD(markType, RowMarkType);
+ READ_INT_FIELD(allMarkTypes);
+ READ_ENUM_FIELD(strength, LockClauseStrength);
+ READ_ENUM_FIELD(waitPolicy, LockWaitPolicy);
+ READ_BOOL_FIELD(isParent);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readPlanInvalItem
+ */
+ static PlanInvalItem *
+ _readPlanInvalItem(void)
+ {
+ READ_LOCALS(PlanInvalItem);
+
+ READ_INT_FIELD(cacheId);
+ READ_UINT_FIELD(hashValue);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readSubPlan
+ */
+ static SubPlan *
+ _readSubPlan(void)
+ {
+ READ_LOCALS(SubPlan);
+
+ READ_ENUM_FIELD(subLinkType, SubLinkType);
+ READ_NODE_FIELD(testexpr);
+ READ_NODE_FIELD(paramIds);
+ READ_INT_FIELD(plan_id);
+ READ_STRING_FIELD(plan_name);
+ READ_OID_FIELD(firstColType);
+ READ_INT_FIELD(firstColTypmod);
+ READ_OID_FIELD(firstColCollation);
+ READ_BOOL_FIELD(useHashTable);
+ READ_BOOL_FIELD(unknownEqFalse);
+ READ_NODE_FIELD(setParam);
+ READ_NODE_FIELD(parParam);
+ READ_NODE_FIELD(args);
+ READ_FLOAT_FIELD(startup_cost);
+ READ_FLOAT_FIELD(per_call_cost);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readAlternativeSubPlan
+ */
+ static AlternativeSubPlan *
+ _readAlternativeSubPlan(void)
+ {
+ READ_LOCALS(AlternativeSubPlan);
+
+ READ_NODE_FIELD(subplans);
+
+ READ_DONE();
+ }
+
+ /*
+ * _readExtensibleNode
+ */
+ static ExtensibleNode *
+ _readExtensibleNode(void)
+ {
+ const ExtensibleNodeMethods *methods;
+ ExtensibleNode *local_node;
+ const char *extnodename;
+
+ READ_TEMP_LOCALS();
+
+ token = pg_strtok(&length); /* skip :extnodename */
+ token = pg_strtok(&length); /* get extnodename */
+
+ extnodename = nullable_string(token, length);
+ if (!extnodename)
+ elog(ERROR, "extnodename has to be supplied");
+ methods = GetExtensibleNodeMethods(extnodename, false);
+
+ local_node = (ExtensibleNode *) newNode(methods->node_size,
+ T_ExtensibleNode);
+ local_node->extnodename = extnodename;
+
+ /* deserialize the private fields */
+ methods->nodeRead(local_node);
+
+ READ_DONE();
+ }
+
+ /*
+ * parseNodeString
+ *
+ * Given a character string representing a node tree, parseNodeString creates
+ * the internal node structure.
+ *
+ * The string to be read must already have been loaded into pg_strtok().
+ */
+ Node *
+ parseNodeString(void)
+ {
+ void *return_value;
+
+ READ_TEMP_LOCALS();
+
+ token = pg_strtok(&length);
+
+ #define MATCH(tokname, namelen) \
+ (length == namelen && memcmp(token, tokname, namelen) == 0)
+
+ if (MATCH("QUERY", 5))
+ return_value = _readQuery();
+ else if (MATCH("WITHCHECKOPTION", 15))
+ return_value = _readWithCheckOption();
+ else if (MATCH("SORTGROUPCLAUSE", 15))
+ return_value = _readSortGroupClause();
+ else if (MATCH("GROUPINGSET", 11))
+ return_value = _readGroupingSet();
+ else if (MATCH("WINDOWCLAUSE", 12))
+ return_value = _readWindowClause();
+ else if (MATCH("ROWMARKCLAUSE", 13))
+ return_value = _readRowMarkClause();
+ else if (MATCH("COMMONTABLEEXPR", 15))
+ return_value = _readCommonTableExpr();
+ else if (MATCH("SETOPERATIONSTMT", 16))
+ return_value = _readSetOperationStmt();
+ else if (MATCH("ALIAS", 5))
+ return_value = _readAlias();
+ else if (MATCH("RANGEVAR", 8))
+ return_value = _readRangeVar();
+ else if (MATCH("INTOCLAUSE", 10))
+ return_value = _readIntoClause();
+ else if (MATCH("VAR", 3))
+ return_value = _readVar();
+ else if (MATCH("CONST", 5))
+ return_value = _readConst();
+ else if (MATCH("PARAM", 5))
+ return_value = _readParam();
+ else if (MATCH("AGGREF", 6))
+ return_value = _readAggref();
+ else if (MATCH("GROUPINGFUNC", 12))
+ return_value = _readGroupingFunc();
+ else if (MATCH("WINDOWFUNC", 10))
+ return_value = _readWindowFunc();
+ else if (MATCH("ARRAYREF", 8))
+ return_value = _readArrayRef();
+ else if (MATCH("FUNCEXPR", 8))
+ return_value = _readFuncExpr();
+ else if (MATCH("NAMEDARGEXPR", 12))
+ return_value = _readNamedArgExpr();
+ else if (MATCH("OPEXPR", 6))
+ return_value = _readOpExpr();
+ else if (MATCH("DISTINCTEXPR", 12))
+ return_value = _readDistinctExpr();
+ else if (MATCH("NULLIFEXPR", 10))
+ return_value = _readNullIfExpr();
+ else if (MATCH("SCALARARRAYOPEXPR", 17))
+ return_value = _readScalarArrayOpExpr();
+ else if (MATCH("BOOLEXPR", 8))
+ return_value = _readBoolExpr();
+ else if (MATCH("SUBLINK", 7))
+ return_value = _readSubLink();
+ else if (MATCH("FIELDSELECT", 11))
+ return_value = _readFieldSelect();
+ else if (MATCH("FIELDSTORE", 10))
+ return_value = _readFieldStore();
+ else if (MATCH("RELABELTYPE", 11))
+ return_value = _readRelabelType();
+ else if (MATCH("COERCEVIAIO", 11))
+ return_value = _readCoerceViaIO();
+ else if (MATCH("ARRAYCOERCEEXPR", 15))
+ return_value = _readArrayCoerceExpr();
+ else if (MATCH("CONVERTROWTYPEEXPR", 18))
+ return_value = _readConvertRowtypeExpr();
+ else if (MATCH("COLLATE", 7))
+ return_value = _readCollateExpr();
+ else if (MATCH("CASE", 4))
+ return_value = _readCaseExpr();
+ else if (MATCH("WHEN", 4))
+ return_value = _readCaseWhen();
+ else if (MATCH("CASETESTEXPR", 12))
+ return_value = _readCaseTestExpr();
+ else if (MATCH("ARRAY", 5))
+ return_value = _readArrayExpr();
+ else if (MATCH("ROW", 3))
+ return_value = _readRowExpr();
+ else if (MATCH("ROWCOMPARE", 10))
+ return_value = _readRowCompareExpr();
+ else if (MATCH("COALESCE", 8))
+ return_value = _readCoalesceExpr();
+ else if (MATCH("MINMAX", 6))
+ return_value = _readMinMaxExpr();
+ else if (MATCH("XMLEXPR", 7))
+ return_value = _readXmlExpr();
+ else if (MATCH("NULLTEST", 8))
+ return_value = _readNullTest();
+ else if (MATCH("BOOLEANTEST", 11))
+ return_value = _readBooleanTest();
+ else if (MATCH("COERCETODOMAIN", 14))
+ return_value = _readCoerceToDomain();
+ else if (MATCH("COERCETODOMAINVALUE", 19))
+ return_value = _readCoerceToDomainValue();
+ else if (MATCH("SETTODEFAULT", 12))
+ return_value = _readSetToDefault();
+ else if (MATCH("CURRENTOFEXPR", 13))
+ return_value = _readCurrentOfExpr();
+ else if (MATCH("INFERENCEELEM", 13))
+ return_value = _readInferenceElem();
+ else if (MATCH("TARGETENTRY", 11))
+ return_value = _readTargetEntry();
+ else if (MATCH("RANGETBLREF", 11))
+ return_value = _readRangeTblRef();
+ else if (MATCH("JOINEXPR", 8))
+ return_value = _readJoinExpr();
+ else if (MATCH("FROMEXPR", 8))
+ return_value = _readFromExpr();
+ else if (MATCH("ONCONFLICTEXPR", 14))
+ return_value = _readOnConflictExpr();
+ else if (MATCH("RTE", 3))
+ return_value = _readRangeTblEntry();
+ else if (MATCH("RANGETBLFUNCTION", 16))
+ return_value = _readRangeTblFunction();
+ else if (MATCH("TABLESAMPLECLAUSE", 17))
+ return_value = _readTableSampleClause();
+ else if (MATCH("NOTIFY", 6))
+ return_value = _readNotifyStmt();
+ else if (MATCH("DEFELEM", 7))
+ return_value = _readDefElem();
+ else if (MATCH("DECLARECURSOR", 13))
+ return_value = _readDeclareCursorStmt();
+ else if (MATCH("PLANNEDSTMT", 11))
+ return_value = _readPlannedStmt();
+ else if (MATCH("PLAN", 4))
+ return_value = _readPlan();
+ else if (MATCH("RESULT", 6))
+ return_value = _readResult();
+ else if (MATCH("MODIFYTABLE", 11))
+ return_value = _readModifyTable();
+ else if (MATCH("APPEND", 6))
+ return_value = _readAppend();
+ else if (MATCH("MERGEAPPEND", 11))
+ return_value = _readMergeAppend();
+ else if (MATCH("RECURSIVEUNION", 14))
+ return_value = _readRecursiveUnion();
+ else if (MATCH("BITMAPAND", 9))
+ return_value = _readBitmapAnd();
+ else if (MATCH("BITMAPOR", 8))
+ return_value = _readBitmapOr();
+ else if (MATCH("SCAN", 4))
+ return_value = _readScan();
+ else if (MATCH("SEQSCAN", 7))
+ return_value = _readSeqScan();
+ else if (MATCH("SAMPLESCAN", 10))
+ return_value = _readSampleScan();
+ else if (MATCH("INDEXSCAN", 9))
+ return_value = _readIndexScan();
+ else if (MATCH("INDEXONLYSCAN", 13))
+ return_value = _readIndexOnlyScan();
+ else if (MATCH("BITMAPINDEXSCAN", 15))
+ return_value = _readBitmapIndexScan();
+ else if (MATCH("BITMAPHEAPSCAN", 14))
+ return_value = _readBitmapHeapScan();
+ else if (MATCH("TIDSCAN", 7))
+ return_value = _readTidScan();
+ else if (MATCH("SUBQUERYSCAN", 12))
+ return_value = _readSubqueryScan();
+ else if (MATCH("FUNCTIONSCAN", 12))
+ return_value = _readFunctionScan();
+ else if (MATCH("VALUESSCAN", 10))
+ return_value = _readValuesScan();
+ else if (MATCH("CTESCAN", 7))
+ return_value = _readCteScan();
+ else if (MATCH("WORKTABLESCAN", 13))
+ return_value = _readWorkTableScan();
+ else if (MATCH("FOREIGNSCAN", 11))
+ return_value = _readForeignScan();
+ else if (MATCH("CUSTOMSCAN", 10))
+ return_value = _readCustomScan();
+ else if (MATCH("JOIN", 4))
+ return_value = _readJoin();
+ else if (MATCH("NESTLOOP", 8))
+ return_value = _readNestLoop();
+ else if (MATCH("MERGEJOIN", 9))
+ return_value = _readMergeJoin();
+ else if (MATCH("HASHJOIN", 8))
+ return_value = _readHashJoin();
+ else if (MATCH("MATERIAL", 8))
+ return_value = _readMaterial();
+ else if (MATCH("SORT", 4))
+ return_value = _readSort();
+ else if (MATCH("GROUP", 5))
+ return_value = _readGroup();
+ else if (MATCH("AGG", 3))
+ return_value = _readAgg();
+ else if (MATCH("WINDOWAGG", 9))
+ return_value = _readWindowAgg();
+ else if (MATCH("UNIQUE", 6))
+ return_value = _readUnique();
+ else if (MATCH("GATHER", 6))
+ return_value = _readGather();
+ else if (MATCH("HASH", 4))
+ return_value = _readHash();
+ else if (MATCH("SETOP", 5))
+ return_value = _readSetOp();
+ else if (MATCH("LOCKROWS", 8))
+ return_value = _readLockRows();
+ else if (MATCH("LIMIT", 5))
+ return_value = _readLimit();
+ else if (MATCH("NESTLOOPPARAM", 13))
+ return_value = _readNestLoopParam();
+ else if (MATCH("PLANROWMARK", 11))
+ return_value = _readPlanRowMark();
+ else if (MATCH("PLANINVALITEM", 13))
+ return_value = _readPlanInvalItem();
+ else if (MATCH("SUBPLAN", 7))
+ return_value = _readSubPlan();
+ else if (MATCH("ALTERNATIVESUBPLAN", 18))
+ return_value = _readAlternativeSubPlan();
+ else if (MATCH("EXTENSIBLENODE", 14))
+ return_value = _readExtensibleNode();
+ else
+ {
+ elog(ERROR, "badly formatted node string \"%.32s\"...", token);
+ return_value = NULL; /* keep compiler quiet */
+ }
+
+ return (Node *) return_value;
+ }
+
+
+ /*
+ * readDatum
+ *
+ * Given a string representation of a constant, recreate the appropriate
+ * Datum. The string representation embeds length info, but not byValue,
+ * so we must be told that.
+ */
+ Datum
+ readDatum(bool typbyval)
+ {
+ Size length,
+ i;
+ int tokenLength;
+ char *token;
+ Datum res;
+ char *s;
+
+ /*
+ * read the actual length of the value
+ */
+ token = pg_strtok(&tokenLength);
+ length = atoui(token);
+
+ token = pg_strtok(&tokenLength); /* read the '[' */
+ if (token == NULL || token[0] != '[')
+ elog(ERROR, "expected \"[\" to start datum, but got \"%s\"; length = %zu",
+ token ? (const char *) token : "[NULL]", length);
+
+ if (typbyval)
+ {
+ if (length > (Size) sizeof(Datum))
+ elog(ERROR, "byval datum but length = %zu", length);
+ res = (Datum) 0;
+ s = (char *) (&res);
+ for (i = 0; i < (Size) sizeof(Datum); i++)
+ {
+ token = pg_strtok(&tokenLength);
+ s[i] = (char) atoi(token);
+ }
+ }
+ else if (length <= 0)
+ res = (Datum) NULL;
+ else
+ {
+ s = (char *) palloc(length);
+ for (i = 0; i < length; i++)
+ {
+ token = pg_strtok(&tokenLength);
+ s[i] = (char) atoi(token);
+ }
+ res = PointerGetDatum(s);
+ }
+
+ token = pg_strtok(&tokenLength); /* read the ']' */
+ if (token == NULL || token[0] != ']')
+ elog(ERROR, "expected \"]\" to end datum, but got \"%s\"; length = %zu",
+ token ? (const char *) token : "[NULL]", length);
+
+ return res;
+ }
+
++<<<<<<< HEAD
++#ifdef XCP
++/*
++ * scanDatum
++ *
++ * Recreate Datum from the text format understandable by the input function
++ * of the specified data type.
++ */
++static Datum
++scanDatum(Oid typid, int typmod)
++{
++ Oid typInput;
++ Oid typioparam;
++ FmgrInfo finfo;
++ FunctionCallInfoData fcinfo;
++ char *value;
++ Datum res;
++ READ_TEMP_LOCALS();
++
++ /* Get input function for the type */
++ getTypeInputInfo(typid, &typInput, &typioparam);
++ fmgr_info(typInput, &finfo);
++
++ /* Read the value */
++ token = pg_strtok(&length);
++ value = nullable_string(token, length);
++
++ /* The value can not be NULL, so we actually received empty string */
++ if (value == NULL)
++ value = "";
++
++ /* Invoke input function */
++ InitFunctionCallInfoData(fcinfo, &finfo, 3, InvalidOid, NULL, NULL);
++
++ fcinfo.arg[0] = CStringGetDatum(value);
++ fcinfo.arg[1] = ObjectIdGetDatum(typioparam);
++ fcinfo.arg[2] = Int32GetDatum(typmod);
++ fcinfo.argnull[0] = false;
++ fcinfo.argnull[1] = false;
++ fcinfo.argnull[2] = false;
++
++ res = FunctionCallInvoke(&fcinfo);
++
++ return res;
++}
++#endif
++=======
+ /*
+ * readAttrNumberCols
+ */
+ AttrNumber *
+ readAttrNumberCols(int numCols)
+ {
+ int tokenLength,
+ i;
+ char *token;
+ AttrNumber *attr_vals;
+
+ if (numCols <= 0)
+ return NULL;
+
+ attr_vals = (AttrNumber *) palloc(numCols * sizeof(AttrNumber));
+ for (i = 0; i < numCols; i++)
+ {
+ token = pg_strtok(&tokenLength);
+ attr_vals[i] = atoi(token);
+ }
+
+ return attr_vals;
+ }
+
+ /*
+ * readOidCols
+ */
+ Oid *
+ readOidCols(int numCols)
+ {
+ int tokenLength,
+ i;
+ char *token;
+ Oid *oid_vals;
+
+ if (numCols <= 0)
+ return NULL;
+
+ oid_vals = (Oid *) palloc(numCols * sizeof(Oid));
+ for (i = 0; i < numCols; i++)
+ {
+ token = pg_strtok(&tokenLength);
+ oid_vals[i] = atooid(token);
+ }
+
+ return oid_vals;
+ }
+
+ /*
+ * readIntCols
+ */
+ int *
+ readIntCols(int numCols)
+ {
+ int tokenLength,
+ i;
+ char *token;
+ int *int_vals;
+
+ if (numCols <= 0)
+ return NULL;
+
+ int_vals = (int *) palloc(numCols * sizeof(int));
+ for (i = 0; i < numCols; i++)
+ {
+ token = pg_strtok(&tokenLength);
+ int_vals[i] = atoi(token);
+ }
+
+ return int_vals;
+ }
+
+ /*
+ * readBoolCols
+ */
+ bool *
+ readBoolCols(int numCols)
+ {
+ int tokenLength,
+ i;
+ char *token;
+ bool *bool_vals;
+
+ if (numCols <= 0)
+ return NULL;
+
+ bool_vals = (bool *) palloc(numCols * sizeof(bool));
+ for (i = 0; i < numCols; i++)
+ {
+ token = pg_strtok(&tokenLength);
+ bool_vals[i] = strtobool(token);
+ }
+
+ return bool_vals;
+ }
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
* allpaths.c
* Routines to find possible search paths for processing a query
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include "postgres.h"
+ #include <limits.h>
#include <math.h>
+#include "catalog/pg_namespace.h"
#include "access/sysattr.h"
#include "access/tsmapi.h"
#include "catalog/pg_class.h"
set_baserel_size_estimates(root, rel);
}
+
+ /*
+ * If this relation could possibly be scanned from within a worker, then set
+ * its consider_parallel flag.
+ */
+ static void
+ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte)
+ {
+ /*
+ * The flag has previously been initialized to false, so we can just
+ * return if it becomes clear that we can't safely set it.
+ */
+ Assert(!rel->consider_parallel);
+
+ /* Don't call this if parallelism is disallowed for the entire query. */
+ Assert(root->glob->parallelModeOK);
+
+ /* This should only be called for baserels and appendrel children. */
+ Assert(rel->reloptkind == RELOPT_BASEREL ||
+ rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
+
+ /* Assorted checks based on rtekind. */
+ switch (rte->rtekind)
+ {
+ case RTE_RELATION:
+
+ /*
+ * Currently, parallel workers can't access the leader's temporary
+ * tables. We could possibly relax this if the wrote all of its
+ * local buffers at the start of the query and made no changes
+ * thereafter (maybe we could allow hint bit changes), and if we
+ * taught the workers to read them. Writing a large number of
+ * temporary buffers could be expensive, though, and we don't have
+ * the rest of the necessary infrastructure right now anyway. So
+ * for now, bail out if we see a temporary table.
+ */
+ if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
+ return;
+
+ /*
+ * Table sampling can be pushed down to workers if the sample
+ * function and its arguments are safe.
+ */
+ if (rte->tablesample != NULL)
+ {
+ Oid proparallel = func_parallel(rte->tablesample->tsmhandler);
+
+ if (proparallel != PROPARALLEL_SAFE)
+ return;
+ if (has_parallel_hazard((Node *) rte->tablesample->args,
+ false))
+ return;
+ }
+
+ /*
+ * Ask FDWs whether they can support performing a ForeignScan
+ * within a worker. Most often, the answer will be no. For
+ * example, if the nature of the FDW is such that it opens a TCP
+ * connection with a remote server, each parallel worker would end
+ * up with a separate connection, and these connections might not
+ * be appropriately coordinated between workers and the leader.
+ */
+ if (rte->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ Assert(rel->fdwroutine);
+ if (!rel->fdwroutine->IsForeignScanParallelSafe)
+ return;
+ if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
+ return;
+ }
+
+ /*
+ * There are additional considerations for appendrels, which we'll
+ * deal with in set_append_rel_size and set_append_rel_pathlist.
+ * For now, just set consider_parallel based on the rel's own
+ * quals and targetlist.
+ */
+ break;
+
+ case RTE_SUBQUERY:
+
+ /*
+ * There's no intrinsic problem with scanning a subquery-in-FROM
+ * (as distinct from a SubPlan or InitPlan) in a parallel worker.
+ * If the subquery doesn't happen to have any parallel-safe paths,
+ * then flagging it as consider_parallel won't change anything,
+ * but that's true for plain tables, too. We must set
+ * consider_parallel based on the rel's own quals and targetlist,
+ * so that if a subquery path is parallel-safe but the quals and
+ * projection we're sticking onto it are not, we correctly mark
+ * the SubqueryScanPath as not parallel-safe. (Note that
+ * set_subquery_pathlist() might push some of these quals down
+ * into the subquery itself, but that doesn't change anything.)
+ */
+ break;
+
+ case RTE_JOIN:
+ /* Shouldn't happen; we're only considering baserels here. */
+ Assert(false);
+ return;
+
+ case RTE_FUNCTION:
+ /* Check for parallel-restricted functions. */
+ if (!function_rte_parallel_ok(rte))
+ return;
+ break;
+
+ case RTE_VALUES:
+
+ /*
+ * The data for a VALUES clause is stored in the plan tree itself,
+ * so scanning it in a worker is fine.
+ */
+ break;
+
+ case RTE_CTE:
+
+ /*
+ * CTE tuplestores aren't shared among parallel workers, so we
+ * force all CTE scans to happen in the leader. Also, populating
+ * the CTE would require executing a subplan that's not available
+ * in the worker, might be parallel-restricted, and must get
+ * executed only once.
+ */
+ return;
+ }
+
+ /*
+ * If there's anything in baserestrictinfo that's parallel-restricted, we
+ * give up on parallelizing access to this relation. We could consider
+ * instead postponing application of the restricted quals until we're
+ * above all the parallelism in the plan tree, but it's not clear that
+ * that would be a win in very many cases, and it might be tricky to make
+ * outer join clauses work correctly. It would likely break equivalence
+ * classes, too.
+ */
+ if (has_parallel_hazard((Node *) rel->baserestrictinfo, false))
+ return;
+
+ /*
+ * Likewise, if the relation's outputs are not parallel-safe, give up.
+ * (Usually, they're just Vars, but sometimes they're not.)
+ */
+ if (has_parallel_hazard((Node *) rel->reltarget->exprs, false))
+ return;
+
+ /* We have a winner. */
+ rel->consider_parallel = true;
+ }
+
+ /*
+ * Check whether a function RTE is scanning something parallel-restricted.
+ */
+ static bool
+ function_rte_parallel_ok(RangeTblEntry *rte)
+ {
+ ListCell *lc;
+
+ foreach(lc, rte->functions)
+ {
+ RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
+
+ Assert(IsA(rtfunc, RangeTblFunction));
+ if (has_parallel_hazard(rtfunc->funcexpr, false))
+ return false;
+ }
+
+ return true;
+ }
+
/*
* set_plain_rel_pathlist
* Build access paths for a plain relation (no subquery, no inheritance)
Relids required_outer;
pushdown_safety_info safetyInfo;
double tuple_fraction;
- PlannerInfo *subroot;
- List *pathkeys;
+#ifdef XCP
+ Distribution *distribution;
+#endif
+ RelOptInfo *sub_final_rel;
+ ListCell *lc;
/*
* Must copy the Query so that planning doesn't mess up the RTE contents
return;
}
- /* Mark rel with estimated output rows, width, etc */
+ /*
+ * Mark rel with estimated output rows, width, etc. Note that we have to
+ * do this before generating outer-query paths, else cost_subqueryscan is
+ * not happy.
+ */
set_subquery_size_estimates(root, rel);
- /* Convert subquery pathkeys to outer representation */
- pathkeys = convert_subquery_pathkeys(root, rel, subroot->query_pathkeys);
-
+ /* Generate appropriate path */
+#ifdef XCP
+ if (subroot->distribution && subroot->distribution->distributionExpr)
+ {
+ ListCell *lc;
+ /*
+ * The distribution expression from the subplan's tlist, but it should
+ * be from the rel, need conversion.
+ */
+ distribution = makeNode(Distribution);
+ distribution->distributionType = subroot->distribution->distributionType;
+ distribution->nodes = bms_copy(subroot->distribution->nodes);
+ distribution->restrictNodes = bms_copy(subroot->distribution->restrictNodes);
+ foreach(lc, rel->subplan->targetlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+ if (equal(tle->expr, subroot->distribution->distributionExpr))
+ {
+ distribution->distributionExpr = (Node *)
+ makeVarFromTargetEntry(rel->relid, tle);
+ break;
+ }
+ }
+ }
+ else
+ distribution = subroot->distribution;
+ add_path(rel, create_subqueryscan_path(root, rel, pathkeys, required_outer,
+ distribution));
+
+ /*
+ * Temporarily block ORDER BY in subqueries until we can add support
+ * it in Postgres-XL without outputting incorrect results. Should
+ * do this only in normal processing mode though!
+ *
+ * The extra conditions below try to handle cases where an ORDER BY
+ * appears in a simple VIEW or INSERT SELECT.
+ */
+ if (IsUnderPostmaster &&
+ list_length(subquery->sortClause) > 1
+ && (subroot->parent_root != root
+ || (subroot->parent_root == root
+ && (root->parse->commandType != CMD_SELECT
+ || (root->parse->commandType == CMD_SELECT
+ && root->parse->hasWindowFuncs)))))
+ elog(ERROR, "Postgres-XL does not currently support ORDER BY in subqueries");
+#else
- add_path(rel, create_subqueryscan_path(root, rel, pathkeys, required_outer));
- #endif
+
+ /*
+ * For each Path that subquery_planner produced, make a SubqueryScanPath
+ * in the outer query.
+ */
+ foreach(lc, sub_final_rel->pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+ List *pathkeys;
+
+ /* Convert subpath's pathkeys to outer representation */
+ pathkeys = convert_subquery_pathkeys(root,
+ rel,
+ subpath->pathkeys,
+ make_tlist_from_pathtarget(subpath->pathtarget));
+
+ /* Generate outer path using this subpath */
+ add_path(rel, (Path *)
+ create_subqueryscan_path(root, rel, subpath,
+ pathkeys, required_outer));
+ }
++#endif
}
/*
* values.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
+#ifdef XCP
+double network_byte_cost = DEFAULT_NETWORK_BYTE_COST;
+double remote_query_cost = DEFAULT_REMOTE_QUERY_COST;
+#endif
+ double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
+ double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
* off.
*/
else if (enable_material && innersortkeys != NIL &&
- relation_byte_size(inner_path_rows, inner_path->parent->width) >
+ relation_byte_size(inner_path_rows,
+ inner_path->pathtarget->width) >
(work_mem * 1024L))
path->materialize_inner = true;
+#ifdef XCP
+ /*
+ * Even if innersortkeys are specified, we never add the Sort node on top
+ * of RemoteSubplan, instead we set up internal sorter.
+ * Since RemoteSubplan does not support mark/restore we must materialize it
+ */
+ else if (inner_path->pathtype == T_RemoteSubplan)
+ path->materialize_inner = true;
+#endif
else
path->materialize_inner = false;
* Planning is complete, we just need to convert the selected
* Path into a Plan.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
static Plan *create_append_plan(PlannerInfo *root, AppendPath *best_path);
static Plan *create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path);
static Result *create_result_plan(PlannerInfo *root, ResultPath *best_path);
- static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path);
- static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path);
+#ifdef XCP
+static void adjust_subplan_distribution(PlannerInfo *root, Distribution *pathd,
+ Distribution *subd);
+static RemoteSubplan *create_remotescan_plan(PlannerInfo *root,
+ RemoteSubPath *best_path);
+static char *get_internal_cursor(void);
+#endif
+ static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
+ int flags);
+ static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
+ int flags);
+ static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
+ static Plan *create_projection_plan(PlannerInfo *root, ProjectionPath *best_path);
+ static Plan *inject_projection_plan(Plan *subplan, List *tlist);
+ static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
+ static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
+ static Unique *create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path,
+ int flags);
+ static Agg *create_agg_plan(PlannerInfo *root, AggPath *best_path);
+ static Plan *create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path);
+ static Result *create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path);
+ static WindowAgg *create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path);
+ static SetOp *create_setop_plan(PlannerInfo *root, SetOpPath *best_path,
+ int flags);
+ static RecursiveUnion *create_recursiveunion_plan(PlannerInfo *root, RecursiveUnionPath *best_path);
+ static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc,
+ List *tlist,
+ int numSortCols, AttrNumber *sortColIdx,
+ int *partNumCols,
+ AttrNumber **partColIdx,
+ Oid **partOperators,
+ int *ordNumCols,
+ AttrNumber **ordColIdx,
+ Oid **ordOperators);
+ static LockRows *create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
+ int flags);
+ static ModifyTable *create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path);
+ static Limit *create_limit_plan(PlannerInfo *root, LimitPath *best_path,
+ int flags);
static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
static SampleScan *create_samplescan_plan(PlannerInfo *root, Path *best_path,
static EquivalenceMember *find_ec_member_for_tle(EquivalenceClass *ec,
TargetEntry *tle,
Relids relids);
+ static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys);
+ static Sort *make_sort_from_groupcols(List *groupcls,
+ AttrNumber *grpColIdx,
+ Plan *lefttree);
static Material *make_material(Plan *lefttree);
+ static WindowAgg *make_windowagg(List *tlist, Index winref,
+ int partNumCols, AttrNumber *partColIdx, Oid *partOperators,
+ int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators,
+ int frameOptions, Node *startOffset, Node *endOffset,
+ Plan *lefttree);
+ static Group *make_group(List *tlist, List *qual, int numGroupCols,
+ AttrNumber *grpColIdx, Oid *grpOperators,
+ Plan *lefttree);
+ static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList);
+ static Unique *make_unique_from_pathkeys(Plan *lefttree,
+ List *pathkeys, int numCols);
+ static Gather *make_gather(List *qptlist, List *qpqual,
+ int nworkers, bool single_copy, Plan *subplan);
+ static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
+ List *distinctList, AttrNumber flagColIdx, int firstFlag,
+ long numGroups);
+ static LockRows *make_lockrows(Plan *lefttree, List *rowMarks, int epqParam);
+ static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
+ static ModifyTable *make_modifytable(PlannerInfo *root,
+ CmdType operation, bool canSetTag,
+ Index nominalRelation,
+ List *resultRelations, List *subplans,
+ List *withCheckOptionLists, List *returningLists,
+ List *rowMarks, OnConflictExpr *onconflict, int epqParam);
+#ifdef XCP
+static int add_sort_column(AttrNumber colIdx, Oid sortOp, Oid coll,
+ bool nulls_first,int numCols, AttrNumber *sortColIdx,
+ Oid *sortOperators, Oid *collations, bool *nullsFirst);
+#endif
/*
* create_plan
/* Initialize this module's private workspace in PlannerInfo */
root->curOuterRels = NULL;
root->curOuterParams = NIL;
+#ifdef XCP
+ root->curOuterRestrict = NULL;
+ adjust_subplan_distribution(root, root->distribution,
+ best_path->distribution);
+#endif
- /* Recursively process the path tree */
- plan = create_plan_recurse(root, best_path);
+ /* Recursively process the path tree, demanding the correct tlist result */
+ plan = create_plan_recurse(root, best_path, CP_EXACT_TLIST);
+
+ /*
+ * Make sure the topmost plan node's targetlist exposes the original
+ * column names and other decorative info. Targetlists generated within
+ * the planner don't bother with that stuff, but we must have it on the
+ * top-level tlist seen at execution time. However, ModifyTable plan
+ * nodes don't have a tlist matching the querytree targetlist.
+ */
+ if (!IsA(plan, ModifyTable))
+ apply_tlist_labeling(plan->targetlist, root->processed_tlist);
+
+ /*
+ * Attach any initPlans created in this query level to the topmost plan
+ * node. (In principle the initplans could go in any plan node at or
+ * above where they're referenced, but there seems no reason to put them
+ * any lower than the topmost node for the query level. Also, see
+ * comments for SS_finalize_plan before you try to change this.)
+ */
+ SS_attach_initplans(root, plan);
/* Check we successfully assigned all NestLoopParams to plan nodes */
if (root->curOuterParams != NIL)
case T_WorkTableScan:
case T_ForeignScan:
case T_CustomScan:
- plan = create_scan_plan(root, best_path);
+ plan = create_scan_plan(root, best_path, flags);
break;
+#ifdef XCP
+ case T_RemoteSubplan:
+ plan = (Plan *) create_remotescan_plan(root,
+ (RemoteSubPath *) best_path);
+ break;
+#endif
case T_HashJoin:
case T_MergeJoin:
case T_NestLoop:
tle = tlist_member(uniqexpr, newtlist);
if (!tle)
{
- tle = makeTargetEntry((Expr *) uniqexpr,
- nextresno,
- NULL,
- false);
- newtlist = lappend(newtlist, tle);
- nextresno++;
- newitems = true;
+ tle = makeTargetEntry((Expr *) uniqexpr,
+ nextresno,
+ NULL,
+ false);
+ newtlist = lappend(newtlist, tle);
+ nextresno++;
+ newitems = true;
+ }
+ }
+
+ if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
+ {
+ /*
+ * If the top plan node can't do projections and its existing target
+ * list isn't already what we need, we need to add a Result node to
+ * help it along.
+ */
+ if (!is_projection_capable_plan(subplan) &&
+ !tlist_same_exprs(newtlist, subplan->targetlist))
+ subplan = inject_projection_plan(subplan, newtlist);
+ else
+ subplan->targetlist = newtlist;
++#ifdef XCP
++ /*
++ * RemoteSubplan is conditionally projection capable - it is pushing
++ * projection to the data nodes
++ */
++ if (IsA(subplan, RemoteSubplan))
++ subplan->lefttree->targetlist = newtlist;
++#endif
+ }
+
+ /*
+ * Build control information showing which subplan output columns are to
+ * be examined by the grouping step. Unfortunately we can't merge this
+ * with the previous loop, since we didn't then know which version of the
+ * subplan tlist we'd end up using.
+ */
+ newtlist = subplan->targetlist;
+ numGroupCols = list_length(uniq_exprs);
+ groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
+
+ groupColPos = 0;
+ foreach(l, uniq_exprs)
+ {
+ Node *uniqexpr = lfirst(l);
+ TargetEntry *tle;
+
+ tle = tlist_member(uniqexpr, newtlist);
+ if (!tle) /* shouldn't happen */
+ elog(ERROR, "failed to find unique expression in subplan tlist");
+ groupColIdx[groupColPos++] = tle->resno;
+ }
+
+ if (best_path->umethod == UNIQUE_PATH_HASH)
+ {
+ Oid *groupOperators;
+
+ /*
+ * Get the hashable equality operators for the Agg node to use.
+ * Normally these are the same as the IN clause operators, but if
+ * those are cross-type operators then the equality operators are the
+ * ones for the IN clause operators' RHS datatype.
+ */
+ groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
+ groupColPos = 0;
+ foreach(l, in_operators)
+ {
+ Oid in_oper = lfirst_oid(l);
+ Oid eq_oper;
+
+ if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
+ elog(ERROR, "could not find compatible hash operator for operator %u",
+ in_oper);
+ groupOperators[groupColPos++] = eq_oper;
+ }
+
+ /*
+ * Since the Agg node is going to project anyway, we can give it the
+ * minimum output tlist, without any stuff we might have added to the
+ * subplan tlist.
+ */
+ plan = (Plan *) make_agg(build_path_tlist(root, &best_path->path),
+ NIL,
+ AGG_HASHED,
+ AGGSPLIT_SIMPLE,
+ numGroupCols,
+ groupColIdx,
+ groupOperators,
+ NIL,
+ NIL,
+ best_path->path.rows,
+ subplan);
+ }
+ else
+ {
+ List *sortList = NIL;
+ Sort *sort;
+
+ /* Create an ORDER BY list to sort the input compatibly */
+ groupColPos = 0;
+ foreach(l, in_operators)
+ {
+ Oid in_oper = lfirst_oid(l);
+ Oid sortop;
+ Oid eqop;
+ TargetEntry *tle;
+ SortGroupClause *sortcl;
+
+ sortop = get_ordering_op_for_equality_op(in_oper, false);
+ if (!OidIsValid(sortop)) /* shouldn't happen */
+ elog(ERROR, "could not find ordering operator for equality operator %u",
+ in_oper);
+
+ /*
+ * The Unique node will need equality operators. Normally these
+ * are the same as the IN clause operators, but if those are
+ * cross-type operators then the equality operators are the ones
+ * for the IN clause operators' RHS datatype.
+ */
+ eqop = get_equality_op_for_ordering_op(sortop, NULL);
+ if (!OidIsValid(eqop)) /* shouldn't happen */
+ elog(ERROR, "could not find equality operator for ordering operator %u",
+ sortop);
+
+ tle = get_tle_by_resno(subplan->targetlist,
+ groupColIdx[groupColPos]);
+ Assert(tle != NULL);
+
+ sortcl = makeNode(SortGroupClause);
+ sortcl->tleSortGroupRef = assignSortGroupRef(tle,
+ subplan->targetlist);
+ sortcl->eqop = eqop;
+ sortcl->sortop = sortop;
+ sortcl->nulls_first = false;
+ sortcl->hashable = false; /* no need to make this accurate */
+ sortList = lappend(sortList, sortcl);
+ groupColPos++;
+ }
+ sort = make_sort_from_sortclauses(sortList, subplan);
+ label_sort_with_costsize(root, sort, -1.0);
+ plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList);
+ }
+
+ /* Copy cost data from Path to Plan */
+ copy_generic_path_info(plan, &best_path->path);
+
+ return plan;
+ }
+
+ /*
+ * create_gather_plan
+ *
+ * Create a Gather plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static Gather *
+ create_gather_plan(PlannerInfo *root, GatherPath *best_path)
+ {
+ Gather *gather_plan;
+ Plan *subplan;
+ List *tlist;
+
+ /*
+ * Although the Gather node can project, we prefer to push down such work
+ * to its child node, so demand an exact tlist from the child.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ gather_plan = make_gather(tlist,
+ NIL,
+ best_path->path.parallel_workers,
+ best_path->single_copy,
+ subplan);
+
+ copy_generic_path_info(&gather_plan->plan, &best_path->path);
+
+ /* use parallel mode for parallel plans. */
+ root->glob->parallelModeNeeded = true;
+
+ return gather_plan;
+ }
+
+ /*
+ * create_projection_plan
+ *
+ * Create a plan tree to do a projection step and (recursively) plans
+ * for its subpaths. We may need a Result node for the projection,
+ * but sometimes we can just let the subplan do the work.
+ */
+ static Plan *
+ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path)
+ {
+ Plan *plan;
+ Plan *subplan;
+ List *tlist;
+
+ /* Since we intend to project, we don't need to constrain child tlist */
+ subplan = create_plan_recurse(root, best_path->subpath, 0);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ /*
+ * We might not really need a Result node here, either because the subplan
+ * can project or because it's returning the right list of expressions
+ * anyway. Usually create_projection_path will have detected that and set
+ * dummypp if we don't need a Result; but its decision can't be final,
+ * because some createplan.c routines change the tlists of their nodes.
+ * (An example is that create_merge_append_plan might add resjunk sort
+ * columns to a MergeAppend.) So we have to recheck here. If we do
+ * arrive at a different answer than create_projection_path did, we'll
+ * have made slightly wrong cost estimates; but label the plan with the
+ * cost estimates we actually used, not "corrected" ones. (XXX this could
+ * be cleaned up if we moved more of the sortcolumn setup logic into Path
+ * creation, but that would add expense to creating Paths we might end up
+ * not using.)
+ */
+ if (is_projection_capable_path(best_path->subpath) ||
+ tlist_same_exprs(tlist, subplan->targetlist))
+ {
+ /* Don't need a separate Result, just assign tlist to subplan */
+ plan = subplan;
+ plan->targetlist = tlist;
+
+ /* Label plan with the estimated costs we actually used */
+ plan->startup_cost = best_path->path.startup_cost;
+ plan->total_cost = best_path->path.total_cost;
+ plan->plan_rows = best_path->path.rows;
+ plan->plan_width = best_path->path.pathtarget->width;
+ /* ... but be careful not to munge subplan's parallel-aware flag */
+ }
+ else
+ {
+ /* We need a Result node */
+ plan = (Plan *) make_result(tlist, NULL, subplan);
+
+ copy_generic_path_info(plan, (Path *) best_path);
+ }
+
+ return plan;
+ }
+
+ /*
+ * inject_projection_plan
+ * Insert a Result node to do a projection step.
+ *
+ * This is used in a few places where we decide on-the-fly that we need a
+ * projection step as part of the tree generated for some Path node.
+ * We should try to get rid of this in favor of doing it more honestly.
+ */
+ static Plan *
+ inject_projection_plan(Plan *subplan, List *tlist)
+ {
+ Plan *plan;
+
+ plan = (Plan *) make_result(tlist, NULL, subplan);
+
+ /*
+ * In principle, we should charge tlist eval cost plus cpu_per_tuple per
+ * row for the Result node. But the former has probably been factored in
+ * already and the latter was not accounted for during Path construction,
+ * so being formally correct might just make the EXPLAIN output look less
+ * consistent not more so. Hence, just copy the subplan's cost.
+ */
+ copy_plan_costsize(plan, subplan);
+
+ return plan;
+ }
+
+ /*
+ * create_sort_plan
+ *
+ * Create a Sort plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static Sort *
+ create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
+ {
+ Sort *plan;
+ Plan *subplan;
+
+ /*
+ * We don't want any excess columns in the sorted tuples, so request a
+ * smaller tlist. Otherwise, since Sort doesn't project, tlist
+ * requirements pass through.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath,
+ flags | CP_SMALL_TLIST);
+
+ plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+ }
+
+ /*
+ * create_group_plan
+ *
+ * Create a Group plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static Group *
+ create_group_plan(PlannerInfo *root, GroupPath *best_path)
+ {
+ Group *plan;
+ Plan *subplan;
+ List *tlist;
+ List *quals;
+
+ /*
+ * Group can project, so no need to be terribly picky about child tlist,
+ * but we do need grouping columns to be available
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ quals = order_qual_clauses(root, best_path->qual);
+
+ plan = make_group(tlist,
+ quals,
+ list_length(best_path->groupClause),
+ extract_grouping_cols(best_path->groupClause,
+ subplan->targetlist),
+ extract_grouping_ops(best_path->groupClause),
+ subplan);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+ }
+
+ /*
+ * create_upper_unique_plan
+ *
+ * Create a Unique plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static Unique *
+ create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path, int flags)
+ {
+ Unique *plan;
+ Plan *subplan;
+
+ /*
+ * Unique doesn't project, so tlist requirements pass through; moreover we
+ * need grouping columns to be labeled.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath,
+ flags | CP_LABEL_TLIST);
+
+ plan = make_unique_from_pathkeys(subplan,
+ best_path->path.pathkeys,
+ best_path->numkeys);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+ }
+
+ /*
+ * create_agg_plan
+ *
+ * Create an Agg plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static Agg *
+ create_agg_plan(PlannerInfo *root, AggPath *best_path)
+ {
+ Agg *plan;
+ Plan *subplan;
+ List *tlist;
+ List *quals;
+
+ /*
+ * Agg can project, so no need to be terribly picky about child tlist, but
+ * we do need grouping columns to be available
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ quals = order_qual_clauses(root, best_path->qual);
+
+ plan = make_agg(tlist, quals,
+ best_path->aggstrategy,
+ best_path->aggsplit,
+ list_length(best_path->groupClause),
+ extract_grouping_cols(best_path->groupClause,
+ subplan->targetlist),
+ extract_grouping_ops(best_path->groupClause),
+ NIL,
+ NIL,
+ best_path->numGroups,
+ subplan);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+ }
+
+ /*
+ * Given a groupclause for a collection of grouping sets, produce the
+ * corresponding groupColIdx.
+ *
+ * root->grouping_map maps the tleSortGroupRef to the actual column position in
+ * the input tuple. So we get the ref from the entries in the groupclause and
+ * look them up there.
+ */
+ static AttrNumber *
+ remap_groupColIdx(PlannerInfo *root, List *groupClause)
+ {
+ AttrNumber *grouping_map = root->grouping_map;
+ AttrNumber *new_grpColIdx;
+ ListCell *lc;
+ int i;
+
+ Assert(grouping_map);
+
+ new_grpColIdx = palloc0(sizeof(AttrNumber) * list_length(groupClause));
+
+ i = 0;
+ foreach(lc, groupClause)
+ {
+ SortGroupClause *clause = lfirst(lc);
+
+ new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
+ }
+
+ return new_grpColIdx;
+ }
+
+ /*
+ * create_groupingsets_plan
+ * Create a plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ *
+ * What we emit is an Agg plan with some vestigial Agg and Sort nodes
+ * hanging off the side. The top Agg implements the last grouping set
+ * specified in the GroupingSetsPath, and any additional grouping sets
+ * each give rise to a subsidiary Agg and Sort node in the top Agg's
+ * "chain" list. These nodes don't participate in the plan directly,
+ * but they are a convenient way to represent the required data for
+ * the extra steps.
+ *
+ * Returns a Plan node.
+ */
+ static Plan *
+ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path)
+ {
+ Agg *plan;
+ Plan *subplan;
+ List *rollup_groupclauses = best_path->rollup_groupclauses;
+ List *rollup_lists = best_path->rollup_lists;
+ AttrNumber *grouping_map;
+ int maxref;
+ List *chain;
+ ListCell *lc,
+ *lc2;
+
+ /* Shouldn't get here without grouping sets */
+ Assert(root->parse->groupingSets);
+ Assert(rollup_lists != NIL);
+ Assert(list_length(rollup_lists) == list_length(rollup_groupclauses));
+
+ /*
+ * Agg can project, so no need to be terribly picky about child tlist, but
+ * we do need grouping columns to be available
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+
+ /*
+ * Compute the mapping from tleSortGroupRef to column index in the child's
+ * tlist. First, identify max SortGroupRef in groupClause, for array
+ * sizing.
+ */
+ maxref = 0;
+ foreach(lc, root->parse->groupClause)
+ {
+ SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
+
+ if (gc->tleSortGroupRef > maxref)
+ maxref = gc->tleSortGroupRef;
+ }
+
+ grouping_map = (AttrNumber *) palloc0((maxref + 1) * sizeof(AttrNumber));
+
+ /* Now look up the column numbers in the child's tlist */
+ foreach(lc, root->parse->groupClause)
+ {
+ SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
+ TargetEntry *tle = get_sortgroupclause_tle(gc, subplan->targetlist);
+
+ grouping_map[gc->tleSortGroupRef] = tle->resno;
+ }
+
+ /*
+ * During setrefs.c, we'll need the grouping_map to fix up the cols lists
+ * in GroupingFunc nodes. Save it for setrefs.c to use.
+ *
+ * This doesn't work if we're in an inheritance subtree (see notes in
+ * create_modifytable_plan). Fortunately we can't be because there would
+ * never be grouping in an UPDATE/DELETE; but let's Assert that.
+ */
+ Assert(!root->hasInheritedTarget);
+ Assert(root->grouping_map == NULL);
+ root->grouping_map = grouping_map;
+
+ /*
+ * Generate the side nodes that describe the other sort and group
+ * operations besides the top one. Note that we don't worry about putting
+ * accurate cost estimates in the side nodes; only the topmost Agg node's
+ * costs will be shown by EXPLAIN.
+ */
+ chain = NIL;
+ if (list_length(rollup_groupclauses) > 1)
+ {
+ forboth(lc, rollup_groupclauses, lc2, rollup_lists)
+ {
+ List *groupClause = (List *) lfirst(lc);
+ List *gsets = (List *) lfirst(lc2);
+ AttrNumber *new_grpColIdx;
+ Plan *sort_plan;
+ Plan *agg_plan;
+
+ /* We want to iterate over all but the last rollup list elements */
+ if (lnext(lc) == NULL)
+ break;
+
+ new_grpColIdx = remap_groupColIdx(root, groupClause);
+
+ sort_plan = (Plan *)
+ make_sort_from_groupcols(groupClause,
+ new_grpColIdx,
+ subplan);
+
+ agg_plan = (Plan *) make_agg(NIL,
+ NIL,
+ AGG_SORTED,
+ AGGSPLIT_SIMPLE,
+ list_length((List *) linitial(gsets)),
+ new_grpColIdx,
+ extract_grouping_ops(groupClause),
+ gsets,
+ NIL,
+ 0, /* numGroups not needed */
+ sort_plan);
+
+ /*
+ * Nuke stuff we don't need to avoid bloating debug output.
+ */
+ sort_plan->targetlist = NIL;
+ sort_plan->lefttree = NULL;
+
+ chain = lappend(chain, agg_plan);
+ }
+ }
+
+ /*
+ * Now make the final Agg node
+ */
+ {
+ List *groupClause = (List *) llast(rollup_groupclauses);
+ List *gsets = (List *) llast(rollup_lists);
+ AttrNumber *top_grpColIdx;
+ int numGroupCols;
+
+ top_grpColIdx = remap_groupColIdx(root, groupClause);
+
+ numGroupCols = list_length((List *) linitial(gsets));
+
+ plan = make_agg(build_path_tlist(root, &best_path->path),
+ best_path->qual,
+ (numGroupCols > 0) ? AGG_SORTED : AGG_PLAIN,
+ AGGSPLIT_SIMPLE,
+ numGroupCols,
+ top_grpColIdx,
+ extract_grouping_ops(groupClause),
+ gsets,
+ chain,
+ 0, /* numGroups not needed */
+ subplan);
+
+ /* Copy cost data from Path to Plan */
+ copy_generic_path_info(&plan->plan, &best_path->path);
+ }
+
+ return (Plan *) plan;
+ }
+
+ /*
+ * create_minmaxagg_plan
+ *
+ * Create a Result plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static Result *
+ create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path)
+ {
+ Result *plan;
+ List *tlist;
+ ListCell *lc;
+
+ /* Prepare an InitPlan for each aggregate's subquery. */
+ foreach(lc, best_path->mmaggregates)
+ {
+ MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
+ PlannerInfo *subroot = mminfo->subroot;
+ Query *subparse = subroot->parse;
+ Plan *plan;
+
+ /*
+ * Generate the plan for the subquery. We already have a Path, but we
+ * have to convert it to a Plan and attach a LIMIT node above it.
+ * Since we are entering a different planner context (subroot),
+ * recurse to create_plan not create_plan_recurse.
+ */
+ plan = create_plan(subroot, mminfo->path);
+
+ plan = (Plan *) make_limit(plan,
+ subparse->limitOffset,
+ subparse->limitCount);
+
+ /* Must apply correct cost/width data to Limit node */
+ plan->startup_cost = mminfo->path->startup_cost;
+ plan->total_cost = mminfo->pathcost;
+ plan->plan_rows = 1;
+ plan->plan_width = mminfo->path->pathtarget->width;
+ plan->parallel_aware = false;
+
+ /* Convert the plan into an InitPlan in the outer query. */
+ SS_make_initplan_from_plan(root, subroot, plan, mminfo->param);
+ }
+
+ /* Generate the output plan --- basically just a Result */
+ tlist = build_path_tlist(root, &best_path->path);
+
+ plan = make_result(tlist, (Node *) best_path->quals, NULL);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ /*
+ * During setrefs.c, we'll need to replace references to the Agg nodes
+ * with InitPlan output params. (We can't just do that locally in the
+ * MinMaxAgg node, because path nodes above here may have Agg references
+ * as well.) Save the mmaggregates list to tell setrefs.c to do that.
+ *
+ * This doesn't work if we're in an inheritance subtree (see notes in
+ * create_modifytable_plan). Fortunately we can't be because there would
+ * never be aggregates in an UPDATE/DELETE; but let's Assert that.
+ */
+ Assert(!root->hasInheritedTarget);
+ Assert(root->minmax_aggs == NIL);
+ root->minmax_aggs = best_path->mmaggregates;
+
+ return plan;
+ }
+
+ /*
+ * create_windowagg_plan
+ *
+ * Create a WindowAgg plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static WindowAgg *
+ create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path)
+ {
+ WindowAgg *plan;
+ WindowClause *wc = best_path->winclause;
+ Plan *subplan;
+ List *tlist;
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+ Oid *collations;
+ bool *nullsFirst;
+ int partNumCols;
+ AttrNumber *partColIdx;
+ Oid *partOperators;
+ int ordNumCols;
+ AttrNumber *ordColIdx;
+ Oid *ordOperators;
+
+ /*
+ * WindowAgg can project, so no need to be terribly picky about child
+ * tlist, but we do need grouping columns to be available
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ /*
+ * We shouldn't need to actually sort, but it's convenient to use
+ * prepare_sort_from_pathkeys to identify the input's sort columns.
+ */
+ subplan = prepare_sort_from_pathkeys(subplan,
+ best_path->winpathkeys,
+ NULL,
+ NULL,
+ false,
+ &numsortkeys,
+ &sortColIdx,
+ &sortOperators,
+ &collations,
+ &nullsFirst);
+
+ /* Now deconstruct that into partition and ordering portions */
+ get_column_info_for_window(root,
+ wc,
+ subplan->targetlist,
+ numsortkeys,
+ sortColIdx,
+ &partNumCols,
+ &partColIdx,
+ &partOperators,
+ &ordNumCols,
+ &ordColIdx,
+ &ordOperators);
+
+ /* And finally we can make the WindowAgg node */
+ plan = make_windowagg(tlist,
+ wc->winref,
+ partNumCols,
+ partColIdx,
+ partOperators,
+ ordNumCols,
+ ordColIdx,
+ ordOperators,
+ wc->frameOptions,
+ wc->startOffset,
+ wc->endOffset,
+ subplan);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+ }
+
+ /*
+ * get_column_info_for_window
+ * Get the partitioning/ordering column numbers and equality operators
+ * for a WindowAgg node.
+ *
+ * This depends on the behavior of planner.c's make_pathkeys_for_window!
+ *
+ * We are given the target WindowClause and an array of the input column
+ * numbers associated with the resulting pathkeys. In the easy case, there
+ * are the same number of pathkey columns as partitioning + ordering columns
+ * and we just have to copy some data around. However, it's possible that
+ * some of the original partitioning + ordering columns were eliminated as
+ * redundant during the transformation to pathkeys. (This can happen even
+ * though the parser gets rid of obvious duplicates. A typical scenario is a
+ * window specification "PARTITION BY x ORDER BY y" coupled with a clause
+ * "WHERE x = y" that causes the two sort columns to be recognized as
+ * redundant.) In that unusual case, we have to work a lot harder to
+ * determine which keys are significant.
+ *
+ * The method used here is a bit brute-force: add the sort columns to a list
+ * one at a time and note when the resulting pathkey list gets longer. But
+ * it's a sufficiently uncommon case that a faster way doesn't seem worth
+ * the amount of code refactoring that'd be needed.
+ */
+ static void
+ get_column_info_for_window(PlannerInfo *root, WindowClause *wc, List *tlist,
+ int numSortCols, AttrNumber *sortColIdx,
+ int *partNumCols,
+ AttrNumber **partColIdx,
+ Oid **partOperators,
+ int *ordNumCols,
+ AttrNumber **ordColIdx,
+ Oid **ordOperators)
+ {
+ int numPart = list_length(wc->partitionClause);
+ int numOrder = list_length(wc->orderClause);
+
+ if (numSortCols == numPart + numOrder)
+ {
+ /* easy case */
+ *partNumCols = numPart;
+ *partColIdx = sortColIdx;
+ *partOperators = extract_grouping_ops(wc->partitionClause);
+ *ordNumCols = numOrder;
+ *ordColIdx = sortColIdx + numPart;
+ *ordOperators = extract_grouping_ops(wc->orderClause);
+ }
+ else
+ {
+ List *sortclauses;
+ List *pathkeys;
+ int scidx;
+ ListCell *lc;
+
+ /* first, allocate what's certainly enough space for the arrays */
+ *partNumCols = 0;
+ *partColIdx = (AttrNumber *) palloc(numPart * sizeof(AttrNumber));
+ *partOperators = (Oid *) palloc(numPart * sizeof(Oid));
+ *ordNumCols = 0;
+ *ordColIdx = (AttrNumber *) palloc(numOrder * sizeof(AttrNumber));
+ *ordOperators = (Oid *) palloc(numOrder * sizeof(Oid));
+ sortclauses = NIL;
+ pathkeys = NIL;
+ scidx = 0;
+ foreach(lc, wc->partitionClause)
+ {
+ SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
+ List *new_pathkeys;
+
+ sortclauses = lappend(sortclauses, sgc);
+ new_pathkeys = make_pathkeys_for_sortclauses(root,
+ sortclauses,
+ tlist);
+ if (list_length(new_pathkeys) > list_length(pathkeys))
+ {
+ /* this sort clause is actually significant */
+ (*partColIdx)[*partNumCols] = sortColIdx[scidx++];
+ (*partOperators)[*partNumCols] = sgc->eqop;
+ (*partNumCols)++;
+ pathkeys = new_pathkeys;
+ }
+ }
+ foreach(lc, wc->orderClause)
+ {
+ SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
+ List *new_pathkeys;
+
+ sortclauses = lappend(sortclauses, sgc);
+ new_pathkeys = make_pathkeys_for_sortclauses(root,
+ sortclauses,
+ tlist);
+ if (list_length(new_pathkeys) > list_length(pathkeys))
+ {
+ /* this sort clause is actually significant */
+ (*ordColIdx)[*ordNumCols] = sortColIdx[scidx++];
+ (*ordOperators)[*ordNumCols] = sgc->eqop;
+ (*ordNumCols)++;
+ pathkeys = new_pathkeys;
+ }
}
+ /* complain if we didn't eat exactly the right number of sort cols */
+ if (scidx != numSortCols)
+ elog(ERROR, "failed to deconstruct sort operators into partitioning/ordering operators");
}
+ }
- if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
- {
- /*
- * If the top plan node can't do projections and its existing target
- * list isn't already what we need, we need to add a Result node to
- * help it along.
- */
- if (!is_projection_capable_plan(subplan) &&
- !tlist_same_exprs(newtlist, subplan->targetlist))
- subplan = (Plan *) make_result(root, newtlist, NULL, subplan);
- else
- subplan->targetlist = newtlist;
- #ifdef XCP
- /*
- * RemoteSubplan is conditionally projection capable - it is pushing
- * projection to the data nodes
- */
- if (IsA(subplan, RemoteSubplan))
- subplan->lefttree->targetlist = newtlist;
- #endif
- }
+ /*
+ * create_setop_plan
+ *
+ * Create a SetOp plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static SetOp *
+ create_setop_plan(PlannerInfo *root, SetOpPath *best_path, int flags)
+ {
+ SetOp *plan;
+ Plan *subplan;
+ long numGroups;
/*
- * Build control information showing which subplan output columns are to
- * be examined by the grouping step. Unfortunately we can't merge this
- * with the previous loop, since we didn't then know which version of the
- * subplan tlist we'd end up using.
+ * SetOp doesn't project, so tlist requirements pass through; moreover we
+ * need grouping columns to be labeled.
*/
- newtlist = subplan->targetlist;
- numGroupCols = list_length(uniq_exprs);
- groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
+ subplan = create_plan_recurse(root, best_path->subpath,
+ flags | CP_LABEL_TLIST);
- groupColPos = 0;
- foreach(l, uniq_exprs)
- {
- Node *uniqexpr = lfirst(l);
- TargetEntry *tle;
+ /* Convert numGroups to long int --- but 'ware overflow! */
+ numGroups = (long) Min(best_path->numGroups, (double) LONG_MAX);
- tle = tlist_member(uniqexpr, newtlist);
- if (!tle) /* shouldn't happen */
- elog(ERROR, "failed to find unique expression in subplan tlist");
- groupColIdx[groupColPos++] = tle->resno;
- }
+ plan = make_setop(best_path->cmd,
+ best_path->strategy,
+ subplan,
+ best_path->distinctList,
+ best_path->flagColIdx,
+ best_path->firstFlag,
+ numGroups);
- if (best_path->umethod == UNIQUE_PATH_HASH)
- {
- long numGroups;
- Oid *groupOperators;
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
- numGroups = (long) Min(best_path->path.rows, (double) LONG_MAX);
+ return plan;
+ }
- /*
- * Get the hashable equality operators for the Agg node to use.
- * Normally these are the same as the IN clause operators, but if
- * those are cross-type operators then the equality operators are the
- * ones for the IN clause operators' RHS datatype.
- */
- groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
- groupColPos = 0;
- foreach(l, in_operators)
- {
- Oid in_oper = lfirst_oid(l);
- Oid eq_oper;
+ /*
+ * create_recursiveunion_plan
+ *
+ * Create a RecursiveUnion plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static RecursiveUnion *
+ create_recursiveunion_plan(PlannerInfo *root, RecursiveUnionPath *best_path)
+ {
+ RecursiveUnion *plan;
+ Plan *leftplan;
+ Plan *rightplan;
+ List *tlist;
+ long numGroups;
- if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
- elog(ERROR, "could not find compatible hash operator for operator %u",
- in_oper);
- groupOperators[groupColPos++] = eq_oper;
- }
+ /* Need both children to produce same tlist, so force it */
+ leftplan = create_plan_recurse(root, best_path->leftpath, CP_EXACT_TLIST);
+ rightplan = create_plan_recurse(root, best_path->rightpath, CP_EXACT_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ /* Convert numGroups to long int --- but 'ware overflow! */
+ numGroups = (long) Min(best_path->numGroups, (double) LONG_MAX);
+
+ plan = make_recursive_union(tlist,
+ leftplan,
+ rightplan,
+ best_path->wtParam,
+ best_path->distinctList,
+ numGroups);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+ }
+
+ /*
+ * create_lockrows_plan
+ *
+ * Create a LockRows plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static LockRows *
+ create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
+ int flags)
+ {
+ LockRows *plan;
+ Plan *subplan;
+
+ /* LockRows doesn't project, so tlist requirements pass through */
+ subplan = create_plan_recurse(root, best_path->subpath, flags);
+
+ plan = make_lockrows(subplan, best_path->rowMarks, best_path->epqParam);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+ }
+
+ /*
+ * create_modifytable_plan
+ * Create a ModifyTable plan for 'best_path'.
+ *
+ * Returns a Plan node.
+ */
+ static ModifyTable *
+ create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path)
+ {
+ ModifyTable *plan;
+ List *subplans = NIL;
+ ListCell *subpaths,
+ *subroots;
+
+ /* Build the plan for each input path */
+ forboth(subpaths, best_path->subpaths,
+ subroots, best_path->subroots)
+ {
+ Path *subpath = (Path *) lfirst(subpaths);
+ PlannerInfo *subroot = (PlannerInfo *) lfirst(subroots);
+ Plan *subplan;
/*
- * Since the Agg node is going to project anyway, we can give it the
- * minimum output tlist, without any stuff we might have added to the
- * subplan tlist.
+ * In an inherited UPDATE/DELETE, reference the per-child modified
+ * subroot while creating Plans from Paths for the child rel. This is
+ * a kluge, but otherwise it's too hard to ensure that Plan creation
+ * functions (particularly in FDWs) don't depend on the contents of
+ * "root" matching what they saw at Path creation time. The main
+ * downside is that creation functions for Plans that might appear
+ * below a ModifyTable cannot expect to modify the contents of "root"
+ * and have it "stick" for subsequent processing such as setrefs.c.
+ * That's not great, but it seems better than the alternative.
*/
- plan = (Plan *) make_agg(root,
- build_path_tlist(root, &best_path->path),
- NIL,
- AGG_HASHED,
- NULL,
- numGroupCols,
- groupColIdx,
- groupOperators,
- NIL,
- numGroups,
- subplan);
+ subplan = create_plan_recurse(subroot, subpath, CP_EXACT_TLIST);
+
+ /* Transfer resname/resjunk labeling, too, to keep executor happy */
+ apply_tlist_labeling(subplan->targetlist, subroot->processed_tlist);
+
+ subplans = lappend(subplans, subplan);
}
- else
- {
- List *sortList = NIL;
- /* Create an ORDER BY list to sort the input compatibly */
- groupColPos = 0;
- foreach(l, in_operators)
- {
- Oid in_oper = lfirst_oid(l);
- Oid sortop;
- Oid eqop;
- TargetEntry *tle;
- SortGroupClause *sortcl;
+ plan = make_modifytable(root,
+ best_path->operation,
+ best_path->canSetTag,
+ best_path->nominalRelation,
+ best_path->resultRelations,
+ subplans,
+ best_path->withCheckOptionLists,
+ best_path->returningLists,
+ best_path->rowMarks,
+ best_path->onconflict,
+ best_path->epqParam);
- sortop = get_ordering_op_for_equality_op(in_oper, false);
- if (!OidIsValid(sortop)) /* shouldn't happen */
- elog(ERROR, "could not find ordering operator for equality operator %u",
- in_oper);
+ copy_generic_path_info(&plan->plan, &best_path->path);
- /*
- * The Unique node will need equality operators. Normally these
- * are the same as the IN clause operators, but if those are
- * cross-type operators then the equality operators are the ones
- * for the IN clause operators' RHS datatype.
- */
- eqop = get_equality_op_for_ordering_op(sortop, NULL);
- if (!OidIsValid(eqop)) /* shouldn't happen */
- elog(ERROR, "could not find equality operator for ordering operator %u",
- sortop);
+ return plan;
+ }
- tle = get_tle_by_resno(subplan->targetlist,
- groupColIdx[groupColPos]);
- Assert(tle != NULL);
+ /*
+ * create_limit_plan
+ *
+ * Create a Limit plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+ static Limit *
+ create_limit_plan(PlannerInfo *root, LimitPath *best_path, int flags)
+ {
+ Limit *plan;
+ Plan *subplan;
- sortcl = makeNode(SortGroupClause);
- sortcl->tleSortGroupRef = assignSortGroupRef(tle,
- subplan->targetlist);
- sortcl->eqop = eqop;
- sortcl->sortop = sortop;
- sortcl->nulls_first = false;
- sortcl->hashable = false; /* no need to make this accurate */
- sortList = lappend(sortList, sortcl);
- groupColPos++;
- }
- plan = (Plan *) make_sort_from_sortclauses(root, sortList, subplan);
- plan = (Plan *) make_unique(plan, sortList);
- }
+ /* Limit doesn't project, so tlist requirements pass through */
+ subplan = create_plan_recurse(root, best_path->subpath, flags);
+
+ plan = make_limit(subplan,
+ best_path->limitOffset,
+ best_path->limitCount);
- /* Adjust output size estimate (other fields should be OK already) */
- plan->plan_rows = best_path->path.rows;
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
return plan;
}
Index scanrelid,
List *fdw_exprs,
List *fdw_private,
- List *fdw_scan_tlist)
+ List *fdw_scan_tlist,
+ List *fdw_recheck_quals,
+ Plan *outer_plan)
{
ForeignScan *node = makeNode(ForeignScan);
+
Plan *plan = &node->scan.plan;
/* cost will be filled in by create_foreignscan_plan */
return node;
}
- RecursiveUnion *
+ static RecursiveUnion *
-make_recursive_union(List *tlist,
+make_recursive_union(
+#ifdef XCP
+ PlannerInfo *root,
+#endif
+ List *tlist,
Plan *lefttree,
Plan *righttree,
int wtParam,
RecursiveUnion *node = makeNode(RecursiveUnion);
Plan *plan = &node->plan;
int numCols = list_length(distinctList);
+#ifdef XCP
+ RemoteSubplan *left_pushdown, *right_pushdown;
+#endif
- cost_recursive_union(plan, lefttree, righttree);
-
plan->targetlist = tlist;
plan->qual = NIL;
plan->lefttree = lefttree;
{
Sort *node = makeNode(Sort);
Plan *plan = &node->plan;
- Path sort_path; /* dummy for result of cost_sort */
+#ifdef XCP
+ RemoteSubplan *pushdown;
+#endif
- copy_plan_costsize(plan, lefttree); /* only care about copying size */
- cost_sort(&sort_path, root, NIL,
- lefttree->total_cost,
- lefttree->plan_rows,
- lefttree->plan_width,
- 0.0,
- work_mem,
- limit_tuples);
- plan->startup_cost = sort_path.startup_cost;
- plan->total_cost = sort_path.total_cost;
plan->targetlist = lefttree->targetlist;
plan->qual = NIL;
plan->lefttree = lefttree;
return matplan;
}
+
+#ifdef XCP
+typedef struct
+{
+ List *subtlist;
+ List *newtlist;
+} find_referenced_cols_context;
+
+static bool
+find_referenced_cols_walker(Node *node, find_referenced_cols_context *context)
+{
+ TargetEntry *tle;
+
+ if (node == NULL)
+ return false;
+ if (IsA(node, Aggref))
+ {
+ /*
+ * We can not push down aggregates with DISTINCT.
+ */
+ if (((Aggref *) node)->aggdistinct)
+ return true;
+
+ /*
+ * We can not push down aggregates with ORDER BY.
+ */
+ if (((Aggref *) node)->aggorder)
+ return true;
+
+ /*
+ * We need to add aggregate reference to the new tlist if it
+ * is not already there. Phase 1 aggregate is actually returns values
+ * of transition data type, so we should change the data type of the
+ * expression.
+ */
+ if (!tlist_member(node, context->newtlist))
+ {
+ Aggref *aggref = (Aggref *) node;
+ Aggref *newagg;
+ TargetEntry *newtle;
+ HeapTuple aggTuple;
+ Form_pg_aggregate aggform;
+ Oid aggtranstype;
+ Oid aggcollecttype;
+
+ aggTuple = SearchSysCache1(AGGFNOID,
+ ObjectIdGetDatum(aggref->aggfnoid));
+ if (!HeapTupleIsValid(aggTuple))
+ elog(ERROR, "cache lookup failed for aggregate %u",
+ aggref->aggfnoid);
+ aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
+ aggtranstype = aggform->aggtranstype;
+ aggcollecttype = aggform->aggcollecttype;
+ ReleaseSysCache(aggTuple);
+
+ /* Can not split two-phase aggregate */
+ if (!OidIsValid(aggcollecttype))
+ return true;
+
+ if (IsPolymorphicType(aggtranstype))
+ {
+ Oid *inputTypes;
+ Oid *declaredArgTypes;
+ int agg_nargs;
+ int numArgs;
+ ListCell *l;
+
+ inputTypes = (Oid *) palloc(sizeof(Oid) * list_length(aggref->args));
+ numArgs = 0;
+ foreach(l, aggref->args)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (!tle->resjunk)
+ inputTypes[numArgs++] = exprType((Node *) tle->expr);
+ }
+
+ /* have to fetch the agg's declared input types... */
+ (void) get_func_signature(aggref->aggfnoid,
+ &declaredArgTypes, &agg_nargs);
+ Assert(agg_nargs == numArgs);
+
+
+ aggtranstype = enforce_generic_type_consistency(inputTypes,
+ declaredArgTypes,
+ agg_nargs,
+ aggtranstype,
+ false);
+ pfree(inputTypes);
+ pfree(declaredArgTypes);
+ }
+ newagg = copyObject(aggref);
+ newagg->aggtype = aggtranstype;
+
+ newtle = makeTargetEntry((Expr *) newagg,
+ list_length(context->newtlist) + 1,
+ NULL,
+ false);
+ context->newtlist = lappend(context->newtlist, newtle);
+ }
+
+ return false;
+ }
+ /*
+ * If expression is in the subtlist copy it into new tlist
+ */
+ tle = tlist_member(node, context->subtlist);
+ if (tle && !tlist_member((Node *) tle->expr, context->newtlist))
+ {
+ TargetEntry *newtle;
+ newtle = makeTargetEntry((Expr *) copyObject(node),
+ list_length(context->newtlist) + 1,
+ tle->resname,
+ false);
+ context->newtlist = lappend(context->newtlist, newtle);
+ return false;
+ }
+ if (IsA(node, Var))
+ {
+ /*
+ * Referenced Var is not a member of subtlist.
+ * Go ahead and add junk one.
+ */
+ TargetEntry *newtle;
+ newtle = makeTargetEntry((Expr *) copyObject(node),
+ list_length(context->newtlist) + 1,
+ NULL,
+ true);
+ context->newtlist = lappend(context->newtlist, newtle);
+ return false;
+ }
+ return expression_tree_walker(node, find_referenced_cols_walker,
+ (void *) context);
+}
+#endif
+
+
Agg *
- make_agg(PlannerInfo *root, List *tlist, List *qual,
- AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
+ make_agg(List *tlist, List *qual,
+ AggStrategy aggstrategy, AggSplit aggsplit,
int numGroupCols, AttrNumber *grpColIdx, Oid *grpOperators,
- List *groupingSets,
- long numGroups,
- Plan *lefttree)
+ List *groupingSets, List *chain,
+ double dNumGroups, Plan *lefttree)
{
Agg *node = makeNode(Agg);
Plan *plan = &node->plan;
- Path agg_path; /* dummy for result of cost_agg */
- QualCost qual_cost;
+#ifdef XCP
+ RemoteSubplan *pushdown;
+#endif
+ long numGroups;
+
+ /* Reduce to long, but 'ware overflow! */
+ numGroups = (long) Min(dNumGroups, (double) LONG_MAX);
node->aggstrategy = aggstrategy;
+ node->aggsplit = aggsplit;
node->numCols = numGroupCols;
node->grpColIdx = grpColIdx;
node->grpOperators = grpOperators;
AttrNumber *uniqColIdx;
Oid *uniqOperators;
ListCell *slitem;
+#ifdef XCP
+ RemoteSubplan *pushdown;
+#endif
- copy_plan_costsize(plan, lefttree);
-
- /*
- * Charge one cpu_operator_cost per comparison per input tuple. We assume
- * all columns get compared at most of the tuples. (XXX probably this is
- * an overestimate.)
- */
- plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
-
- /*
- * plan->plan_rows is left as a copy of the input subplan's plan_rows; ie,
- * we assume the filter removes nothing. The caller must alter this if he
- * has a better idea.
- */
-
plan->targetlist = lefttree->targetlist;
plan->qual = NIL;
plan->lefttree = lefttree;
{
Limit *node = makeNode(Limit);
Plan *plan = &node->plan;
+#ifdef XCP
+ RemoteSubplan *pushdown;
+#endif
- copy_plan_costsize(plan, lefttree);
-
- /*
- * Adjust the output rows count and costs according to the offset/limit.
- * This is only a cosmetic issue if we are at top level, but if we are
- * building a subquery then it's important to report correct info to the
- * outer planner.
- *
- * When the offset or count couldn't be estimated, use 10% of the
- * estimated number of rows emitted from the subplan.
- */
- if (offset_est != 0)
- {
- double offset_rows;
-
- if (offset_est > 0)
- offset_rows = (double) offset_est;
- else
- offset_rows = clamp_row_est(lefttree->plan_rows * 0.10);
- if (offset_rows > plan->plan_rows)
- offset_rows = plan->plan_rows;
- if (plan->plan_rows > 0)
- plan->startup_cost +=
- (plan->total_cost - plan->startup_cost)
- * offset_rows / plan->plan_rows;
- plan->plan_rows -= offset_rows;
- if (plan->plan_rows < 1)
- plan->plan_rows = 1;
- }
-
- if (count_est != 0)
- {
- double count_rows;
-
- if (count_est > 0)
- count_rows = (double) count_est;
- else
- count_rows = clamp_row_est(lefttree->plan_rows * 0.10);
- if (count_rows > plan->plan_rows)
- count_rows = plan->plan_rows;
- if (plan->plan_rows > 0)
- plan->total_cost = plan->startup_cost +
- (plan->total_cost - plan->startup_cost)
- * count_rows / plan->plan_rows;
- plan->plan_rows = count_rows;
- if (plan->plan_rows < 1)
- plan->plan_rows = 1;
- }
-
plan->targetlist = lefttree->targetlist;
plan->qual = NIL;
plan->lefttree = lefttree;
* scan all the rows anyway.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* planner.c
* The query optimizer external interface.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include "parser/parsetree.h"
#include "parser/parse_agg.h"
#include "rewrite/rewriteManip.h"
+ #include "storage/dsm_impl.h"
#include "utils/rel.h"
+#ifdef PGXC
+#include "commands/prepare.h"
+#include "pgxc/pgxc.h"
+#include "pgxc/planner.h"
+#endif
#include "utils/selfuncs.h"
+ #include "utils/lsyscache.h"
+ #include "utils/syscache.h"
- /* GUC parameter */
+ /* GUC parameters */
double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
+ int force_parallel_mode = FORCE_PARALLEL_OFF;
/* Hook for plugins to get control in planner() */
planner_hook_type planner_hook = NULL;
static List *extract_rollup_sets(List *groupingSets);
static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
static void standard_qp_callback(PlannerInfo *root, void *extra);
- static bool choose_hashed_grouping(PlannerInfo *root,
- double tuple_fraction, double limit_tuples,
- double path_rows, int path_width,
- Path *cheapest_path, Path *sorted_path,
- double dNumGroups, AggClauseCosts *agg_costs);
- static bool choose_hashed_distinct(PlannerInfo *root,
- double tuple_fraction, double limit_tuples,
- double path_rows, int path_width,
- Cost cheapest_startup_cost, Cost cheapest_total_cost,
- Cost sorted_startup_cost, Cost sorted_total_cost,
- List *sorted_pathkeys,
- double dNumDistinctRows);
- static List *make_subplanTargetList(PlannerInfo *root, List *tlist,
- AttrNumber **groupColIdx, bool *need_tlist_eval);
- static int get_grouping_column_index(Query *parse, TargetEntry *tle);
- static void locate_grouping_columns(PlannerInfo *root,
- List *tlist,
- List *sub_tlist,
- AttrNumber *groupColIdx);
+ static double get_number_of_groups(PlannerInfo *root,
+ double path_rows,
+ List *rollup_lists,
+ List *rollup_groupclauses);
+ static Size estimate_hashagg_tablesize(Path *path,
+ const AggClauseCosts *agg_costs,
+ double dNumGroups);
+ static RelOptInfo *create_grouping_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ PathTarget *target,
+ const AggClauseCosts *agg_costs,
+ List *rollup_lists,
+ List *rollup_groupclauses);
+ static RelOptInfo *create_window_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ PathTarget *input_target,
+ PathTarget *output_target,
+ List *tlist,
+ WindowFuncLists *wflists,
+ List *activeWindows);
+ static void create_one_window_path(PlannerInfo *root,
+ RelOptInfo *window_rel,
+ Path *path,
+ PathTarget *input_target,
+ PathTarget *output_target,
+ List *tlist,
+ WindowFuncLists *wflists,
+ List *activeWindows);
+ static RelOptInfo *create_distinct_paths(PlannerInfo *root,
+ RelOptInfo *input_rel);
+ static RelOptInfo *create_ordered_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ PathTarget *target,
+ double limit_tuples);
+ static PathTarget *make_group_input_target(PlannerInfo *root,
+ PathTarget *final_target);
+ static PathTarget *make_partial_grouping_target(PlannerInfo *root,
+ PathTarget *grouping_target);
static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
- static List *make_windowInputTargetList(PlannerInfo *root,
- List *tlist, List *activeWindows);
+ static PathTarget *make_window_input_target(PlannerInfo *root,
+ PathTarget *final_target,
+ List *activeWindows);
static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
List *tlist);
- static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc,
- List *tlist,
- int numSortCols, AttrNumber *sortColIdx,
- int *partNumCols,
- AttrNumber **partColIdx,
- Oid **partOperators,
- int *ordNumCols,
- AttrNumber **ordColIdx,
- Oid **ordOperators);
+#ifdef XCP
+static Plan *grouping_distribution(PlannerInfo *root, Plan *plan,
+ int numGroupCols, AttrNumber *groupColIdx,
+ List *current_pathkeys, Distribution **distribution);
+static bool equal_distributions(PlannerInfo *root, Distribution *dst1,
+ Distribution *dst2);
+#endif
- static Plan *build_grouping_chain(PlannerInfo *root,
- Query *parse,
- List *tlist,
- bool need_sort_for_grouping,
- List *rollup_groupclauses,
- List *rollup_lists,
- AttrNumber *groupColIdx,
- AggClauseCosts *agg_costs,
- long numGroups,
- Plan *result_plan);
+ static PathTarget *make_sort_input_target(PlannerInfo *root,
+ PathTarget *final_target,
+ bool *have_postponed_srfs);
+
/*****************************************************************************
*
}
/* primary planning entry point (may recurse for subqueries) */
- top_plan = subquery_planner(glob, parse, NULL,
- false, tuple_fraction, &root);
+ root = subquery_planner(glob, parse, NULL,
+ false, tuple_fraction);
+
+ /* Select best Path and turn it into a Plan */
+ final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
+ best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
+
+ top_plan = create_plan(root, best_path);
+#ifdef XCP
+ if (root->distribution)
+ {
+ top_plan = (Plan *) make_remotesubplan(root, top_plan, NULL,
+ root->distribution,
+ root->query_pathkeys);
+ }
+#endif
/*
* If creating a plan for a scrollable cursor, make sure it can run
result->rowMarks = glob->finalrowmarks;
result->relationOids = glob->relationOids;
result->invalItems = glob->invalItems;
+#ifdef XCP
+ result->distributionType = LOCATOR_TYPE_NONE;
+ result->distributionKey = InvalidAttrNumber;
+ result->distributionNodes = NULL;
+#endif
result->nParamExec = glob->nParamExec;
- result->hasRowSecurity = glob->hasRowSecurity;
return result;
}
List *newWithCheckOptions;
List *newHaving;
bool hasOuterJoins;
+ RelOptInfo *final_rel;
ListCell *l;
+ bool recursiveOk = true;
/* Create a PlannerInfo data structure for this subquery */
root = makeNode(PlannerInfo);
root->eq_classes = NIL;
root->append_rel_list = NIL;
root->rowMarks = NIL;
- root->hasInheritedTarget = false;
+ memset(root->upper_rels, 0, sizeof(root->upper_rels));
+ memset(root->upper_targets, 0, sizeof(root->upper_targets));
+ root->processed_tlist = NIL;
root->grouping_map = NULL;
+ root->recursiveOk = true;
+
+ root->minmax_aggs = NIL;
+ root->hasInheritedTarget = false;
root->hasRecursion = hasRecursion;
if (hasRecursion)
root->wt_param_id = SS_assign_special_param(root);
*/
if (parse->resultRelation &&
rt_fetch(parse->resultRelation, parse->rtable)->inh)
- plan = inheritance_planner(root);
+ inheritance_planner(root);
else
- {
- plan = grouping_planner(root, tuple_fraction);
- /* If it's not SELECT, we need a ModifyTable node */
- if (parse->commandType != CMD_SELECT)
- {
- List *withCheckOptionLists;
- List *returningLists;
- List *rowMarks;
-
- /*
- * Set up the WITH CHECK OPTION and RETURNING lists-of-lists, if
- * needed.
- */
- if (parse->withCheckOptions)
- withCheckOptionLists = list_make1(parse->withCheckOptions);
- else
- withCheckOptionLists = NIL;
-
- if (parse->returningList)
- returningLists = list_make1(parse->returningList);
- else
- returningLists = NIL;
-
- /*
- * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
- * will have dealt with fetching non-locked marked rows, else we
- * need to have ModifyTable do that.
- */
- if (parse->rowMarks)
- rowMarks = NIL;
- else
- rowMarks = root->rowMarks;
+ grouping_planner(root, false, tuple_fraction);
-
+ /*
+ * Capture the set of outer-level param IDs we have access to, for use in
+ * extParam/allParam calculations later.
+ */
+ SS_identify_outer_params(root);
- if (root->query_level > 1)
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("INSERT/UPDATE/DELETE is not supported in subquery")));
+#ifdef XCP
- plan = (Plan *) make_modifytable(root,
- parse->commandType,
- parse->canSetTag,
- parse->resultRelation,
- list_make1_int(parse->resultRelation),
- list_make1(plan),
- withCheckOptionLists,
- returningLists,
- rowMarks,
- parse->onConflict,
- SS_assign_special_param(root));
- }
- }
++ if (root->query_level > 1)
++ ereport(ERROR,
++ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
++ errmsg("INSERT/UPDATE/DELETE is not supported in subquery")));
+#endif
+
++ plan = (Plan *) make_modifytable(root,
++ parse->commandType,
++ parse->canSetTag,
++ parse->resultRelation,
++ list_make1_int(parse->resultRelation),
++ list_make1(plan),
++ withCheckOptionLists,
++ returningLists,
++ rowMarks,
++ parse->onConflict,
++ SS_assign_special_param(root));
+
/*
- * If any subplans were generated, or if there are any parameters to worry
- * about, build initPlan list and extParam/allParam sets for plan nodes,
- * and attach the initPlans to the top plan node.
+ * If any initPlans were created in this query level, increment the
+ * surviving Paths' costs to account for them. They won't actually get
+ * attached to the plan tree till create_plan() runs, but we want to be
+ * sure their costs are included now.
*/
- if (list_length(glob->subplans) != num_old_subplans ||
- root->glob->nParamExec > 0)
- SS_finalize_plan(root, plan, true);
+ final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
+ SS_charge_for_initplans(root, final_rel);
- /* Return internal info if caller wants it */
- if (subroot)
- *subroot = root;
+ /*
+ * Make sure we've identified the cheapest Path for the final rel. (By
+ * doing this here not in grouping_planner, we include initPlan costs in
+ * the decision, though it's unlikely that will change anything.)
+ */
+ set_cheapest(final_rel);
- return plan;
+ /*
+ * XCPTODO
+ * Temporarily block WITH RECURSIVE for most cases
+ * until we can fix. Allow for pg_catalog tables and replicated tables.
+ */
+ {
+ int idx;
+ recursiveOk = true;
+
+ /* seems to start at 1... */
+ for (idx = 1; idx < root->simple_rel_array_size - 1 && recursiveOk; idx++)
+ {
+ RangeTblEntry *rte;
+
+ rte = planner_rt_fetch(idx, root);
+ if (!rte)
+ continue;
+
+ switch (rte->rtekind)
+ {
+ case RTE_JOIN:
+ case RTE_VALUES:
+ case RTE_CTE:
+ continue;
+ case RTE_RELATION:
+ {
+ char loc_type;
+
+ loc_type = GetRelationLocType(rte->relid);
+
+ /* skip pg_catalog */
+ if (loc_type == LOCATOR_TYPE_NONE)
+ continue;
+
+ /* If replicated, allow */
+ if (IsLocatorReplicated(loc_type))
+ continue;
+ else
+ recursiveOk = false;
+ break;
+ }
+ case RTE_SUBQUERY:
+ {
+ RelOptInfo *relOptInfo = root->simple_rel_array[idx];
+ if (relOptInfo && relOptInfo->subroot &&
+ !relOptInfo->subroot->recursiveOk)
+ recursiveOk = false;
+ break;
+ }
+ default:
+ recursiveOk = false;
+ break;
+ }
+ }
+ }
+
+ if (root->recursiveOk)
+ root->recursiveOk = recursiveOk;
+
+ if (root->hasRecursion && !root->recursiveOk)
+ elog(ERROR, "WITH RECURSIVE currently not supported on distributed tables.");
+
+ return root;
}
/*
* If this child rel was excluded by constraint exclusion, exclude it
* from the result plan.
*/
- if (is_dummy_plan(subplan))
+ if (IS_DUMMY_PATH(subpath))
continue;
- subplans = lappend(subplans, subplan);
+#ifdef XCP
+ /*
+ * All subplans should have the same distribution, except may be
+ * restriction. At the moment this is always the case but if this
+ * is changed we should handle inheritance differently.
+ * Effectively we want to push the modify table down to data nodes, if
+ * it is running against distributed inherited tables. To achieve this
+ * we are building up distribution of the query from distributions of
+ * the subplans.
+ * If subplans are restricted to different nodes we should union these
+ * restrictions, if at least one subplan is not restricted we should
+ * not restrict parent plan.
+ * After returning a plan from the function valid root->distribution
+ * value will force proper RemoteSubplan node on top of it.
+ */
+ if (root->distribution == NULL)
+ root->distribution = subroot.distribution;
+ else if (!bms_is_empty(root->distribution->restrictNodes))
+ {
+ if (bms_is_empty(subroot.distribution->restrictNodes))
+ {
+ bms_free(root->distribution->restrictNodes);
+ root->distribution->restrictNodes = NULL;
+ }
+ else
+ {
+ root->distribution->restrictNodes = bms_join(
+ root->distribution->restrictNodes,
+ subroot.distribution->restrictNodes);
+ subroot.distribution->restrictNodes = NULL;
+ }
+ }
+#endif
++ subroots = lappend(subroots, subroot);
+
/*
* If this is the first non-excluded child, its post-planning rtable
* becomes the initial contents of final_rtable; otherwise, append
int64 offset_est = 0;
int64 count_est = 0;
double limit_tuples = -1.0;
- Plan *result_plan;
- List *current_pathkeys;
- double dNumGroups = 0;
- bool use_hashed_distinct = false;
- bool tested_hashed_distinct = false;
+#ifdef XCP
+ Distribution *distribution = NULL; /* distribution of the result_plan */
+#endif
+ bool have_postponed_srfs = false;
+ double tlist_rows;
+ PathTarget *final_target;
+ RelOptInfo *current_rel;
+ RelOptInfo *final_rel;
+ ListCell *lc;
/* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
if (parse->limitCount || parse->limitOffset)
}
/*
- * Select the best path. If we are doing hashed grouping, we will
- * always read all the input tuples, so use the cheapest-total path.
- * Otherwise, the comparison above is correct.
+ * Save the various upper-rel PathTargets we just computed into
+ * root->upper_targets[]. The core code doesn't use this, but it
+ * provides a convenient place for extensions to get at the info. For
+ * consistency, we save all the intermediate targets, even though some
+ * of the corresponding upperrels might not be needed for this query.
*/
- if (use_hashed_grouping || use_hashed_distinct || !sorted_path)
- best_path = cheapest_path;
- else
- best_path = sorted_path;
+ root->upper_targets[UPPERREL_FINAL] = final_target;
+ root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
+ root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
/*
- * Check to see if it's possible to optimize MIN/MAX aggregates. If
- * so, we will forget all the work we did so far to choose a "regular"
- * path ... but we had to do it anyway to be able to tell which way is
- * cheaper.
+ * If we have grouping and/or aggregation, consider ways to implement
+ * that. We build a new upperrel representing the output of this
+ * phase.
*/
- result_plan = optimize_minmax_aggregates(root,
- tlist,
- &agg_costs,
- best_path);
- if (result_plan != NULL)
+ if (have_grouping)
{
- /*
- * optimize_minmax_aggregates generated the full plan, with the
- * right tlist, and it has no sort order.
- */
- current_pathkeys = NIL;
+ current_rel = create_grouping_paths(root,
+ current_rel,
+ grouping_target,
+ &agg_costs,
+ rollup_lists,
+ rollup_groupclauses);
}
- else
+
+ /*
+ * If we have window functions, consider ways to implement those. We
+ * build a new upperrel representing the output of this phase.
+ */
+ if (activeWindows)
{
- /*
- * Normal case --- create a plan according to query_planner's
- * results.
- */
- bool need_sort_for_grouping = false;
+ current_rel = create_window_paths(root,
+ current_rel,
+ grouping_target,
+ sort_input_target,
+ tlist,
+ wflists,
+ activeWindows);
+ }
- result_plan = create_plan(root, best_path);
- current_pathkeys = best_path->pathkeys;
+#ifdef XCP
- distribution = best_path->distribution;
++ distribution = best_path->distribution;
+#endif
+ /*
+ * If there is a DISTINCT clause, consider ways to implement that. We
+ * build a new upperrel representing the output of this phase.
+ */
+ if (parse->distinctClause)
+ {
+ current_rel = create_distinct_paths(root,
+ current_rel);
+ }
- /* Detect if we'll need an explicit sort for grouping */
- if (parse->groupClause && !use_hashed_grouping &&
- !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
- {
- need_sort_for_grouping = true;
+ } /* end of if (setOperations) */
++<<<<<<< HEAD
+ /*
+ * Always override create_plan's tlist, so that we don't sort
+ * useless data from a "physical" tlist.
+ */
+ need_tlist_eval = true;
+ }
+
+ /*
+ * create_plan returns a plan with just a "flat" tlist of required
+ * Vars. Usually we need to insert the sub_tlist as the tlist of
+ * the top plan node. However, we can skip that if we determined
+ * that whatever create_plan chose to return will be good enough.
+ */
+ if (need_tlist_eval)
+ {
+ /*
+ * If the top-level plan node is one that cannot do expression
+ * evaluation and its existing target list isn't already what
+ * we need, we must insert a Result node to project the
+ * desired tlist.
+ */
+ if (!is_projection_capable_plan(result_plan) &&
+ !tlist_same_exprs(sub_tlist, result_plan->targetlist))
+ {
+ result_plan = (Plan *) make_result(root,
+ sub_tlist,
+ NULL,
+ result_plan);
+ }
+ else
+ {
+ /*
+ * Otherwise, just replace the subplan's flat tlist with
+ * the desired tlist.
+ */
+ result_plan->targetlist = sub_tlist;
+ }
+#ifdef XCP
+ /*
+ * RemoteSubplan is conditionally projection capable - it is
+ * pushing projection to the data nodes
+ */
+ if (IsA(result_plan, RemoteSubplan))
+ result_plan->lefttree->targetlist = sub_tlist;
+#endif
+
+ /*
+ * Also, account for the cost of evaluation of the sub_tlist.
+ * See comments for add_tlist_costs_to_plan() for more info.
+ */
+ add_tlist_costs_to_plan(root, result_plan, sub_tlist);
+ }
+ else
+ {
+ /*
+ * Since we're using create_plan's tlist and not the one
+ * make_subplanTargetList calculated, we have to refigure any
+ * grouping-column indexes make_subplanTargetList computed.
+ */
+ locate_grouping_columns(root, tlist, result_plan->targetlist,
+ groupColIdx);
+ }
+
+ /*
+ * groupColIdx is now cast in stone, so record a mapping from
+ * tleSortGroupRef to column index. setrefs.c needs this to
+ * finalize GROUPING() operations.
+ */
+
+ if (parse->groupingSets)
+ {
+ AttrNumber *grouping_map = palloc0(sizeof(AttrNumber) * (maxref + 1));
+ ListCell *lc;
+ int i = 0;
+
+ foreach(lc, parse->groupClause)
+ {
+ SortGroupClause *gc = lfirst(lc);
+
+ grouping_map[gc->tleSortGroupRef] = groupColIdx[i++];
+ }
+
+ root->grouping_map = grouping_map;
+ }
+
+ /*
+ * Insert AGG or GROUP node if needed, plus an explicit sort step
+ * if necessary.
+ *
+ * HAVING clause, if any, becomes qual of the Agg or Group node.
+ */
+ if (use_hashed_grouping)
+ {
+#ifdef XCP
+ result_plan = grouping_distribution(root, result_plan,
+ numGroupCols, groupColIdx,
+ current_pathkeys,
+ &distribution);
+#endif
+ /* Hashed aggregate plan --- no sort needed */
+ result_plan = (Plan *) make_agg(root,
+ tlist,
+ (List *) parse->havingQual,
+ AGG_HASHED,
+ &agg_costs,
+ numGroupCols,
+ groupColIdx,
+ extract_grouping_ops(parse->groupClause),
+ NIL,
+ numGroups,
+ result_plan);
+ /* Hashed aggregation produces randomly-ordered results */
+ current_pathkeys = NIL;
+ }
+ else if (parse->hasAggs || (parse->groupingSets && parse->groupClause))
+ {
+ /*
+ * Output is in sorted order by group_pathkeys if, and only
+ * if, there is a single rollup operation on a non-empty list
+ * of grouping expressions.
+ */
+ if (list_length(rollup_groupclauses) == 1
+ && list_length(linitial(rollup_groupclauses)) > 0)
+ current_pathkeys = root->group_pathkeys;
+ else
+ current_pathkeys = NIL;
+
+#ifdef XCP
+ result_plan = grouping_distribution(root, result_plan,
+ numGroupCols, groupColIdx,
+ current_pathkeys,
+ &distribution);
+#endif
+ result_plan = build_grouping_chain(root,
+ parse,
+ tlist,
+ need_sort_for_grouping,
+ rollup_groupclauses,
+ rollup_lists,
+ groupColIdx,
+ &agg_costs,
+ numGroups,
+ result_plan);
+
+ /*
+ * these are destroyed by build_grouping_chain, so make sure
+ * we don't try and touch them again
+ */
+ rollup_groupclauses = NIL;
+ rollup_lists = NIL;
+ }
+ else if (parse->groupClause)
+ {
+ /*
+ * GROUP BY without aggregation, so insert a group node (plus
+ * the appropriate sort node, if necessary).
+ *
+ * Add an explicit sort if we couldn't make the path come out
+ * the way the GROUP node needs it.
+ */
+ if (need_sort_for_grouping)
+ {
+ result_plan = (Plan *)
+ make_sort_from_groupcols(root,
+ parse->groupClause,
+ groupColIdx,
+ result_plan);
+ current_pathkeys = root->group_pathkeys;
+ }
+
+#ifdef XCP
+ result_plan = grouping_distribution(root, result_plan,
+ numGroupCols, groupColIdx,
+ current_pathkeys,
+ &distribution);
+#endif
+ result_plan = (Plan *) make_group(root,
+ tlist,
+ (List *) parse->havingQual,
+ numGroupCols,
+ groupColIdx,
+ extract_grouping_ops(parse->groupClause),
+ dNumGroups,
+ result_plan);
+ }
+ else if (root->hasHavingQual || parse->groupingSets)
+ {
+ int nrows = list_length(parse->groupingSets);
+
+ /*
+ * No aggregates, and no GROUP BY, but we have a HAVING qual
+ * or grouping sets (which by elimination of cases above must
+ * consist solely of empty grouping sets, since otherwise
+ * groupClause will be non-empty).
+ *
+ * This is a degenerate case in which we are supposed to emit
+ * either 0 or 1 row for each grouping set depending on
+ * whether HAVING succeeds. Furthermore, there cannot be any
+ * variables in either HAVING or the targetlist, so we
+ * actually do not need the FROM table at all! We can just
+ * throw away the plan-so-far and generate a Result node. This
+ * is a sufficiently unusual corner case that it's not worth
+ * contorting the structure of this routine to avoid having to
+ * generate the plan in the first place.
+ */
+#ifdef XCP
+ result_plan = grouping_distribution(root, result_plan, 0, NULL,
+ current_pathkeys,
+ &distribution);
+#endif
+ result_plan = (Plan *) make_result(root,
+ tlist,
+ parse->havingQual,
+ NULL);
+
+ /*
+ * Doesn't seem worthwhile writing code to cons up a
+ * generate_series or a values scan to emit multiple rows.
+ * Instead just clone the result in an Append.
+ */
+ if (nrows > 1)
+ {
+ List *plans = list_make1(result_plan);
+
+ while (--nrows > 0)
+ plans = lappend(plans, copyObject(result_plan));
+
+ result_plan = (Plan *) make_append(plans, tlist);
+ }
+ }
+ } /* end of non-minmax-aggregate case */
++=======
+ /*
+ * If ORDER BY was given, consider ways to implement that, and generate a
+ * new upperrel containing only paths that emit the correct ordering and
+ * project the correct final_target. We can apply the original
+ * limit_tuples limit in sort costing here, but only if there are no
+ * postponed SRFs.
+ */
+ if (parse->sortClause)
+ {
+ current_rel = create_ordered_paths(root,
+ current_rel,
+ final_target,
+ have_postponed_srfs ? -1.0 :
+ limit_tuples);
+ }
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
- /*
- * Since each window function could require a different sort order, we
- * stack up a WindowAgg node for each window, with sort steps between
- * them as needed.
- */
- if (activeWindows)
+ /*
+ * If there are set-returning functions in the tlist, scale up the output
+ * rowcounts of all surviving Paths to account for that. Note that if any
+ * SRFs appear in sorting or grouping columns, we'll have underestimated
+ * the numbers of rows passing through earlier steps; but that's such a
+ * weird usage that it doesn't seem worth greatly complicating matters to
+ * account for it.
+ */
+ tlist_rows = tlist_returns_set_rows(tlist);
+ if (tlist_rows > 1)
+ {
+ foreach(lc, current_rel->pathlist)
{
- List *window_tlist;
- ListCell *l;
-
- /*
- * If the top-level plan node is one that cannot do expression
- * evaluation, we must insert a Result node to project the desired
- * tlist. (In some cases this might not really be required, but
- * it's not worth trying to avoid it. In particular, think not to
- * skip adding the Result if the initial window_tlist matches the
- * top-level plan node's output, because we might change the tlist
- * inside the following loop.) Note that on second and subsequent
- * passes through the following loop, the top-level node will be a
- * WindowAgg which we know can project; so we only need to check
- * once.
- */
- if (!is_projection_capable_plan(result_plan))
- {
- result_plan = (Plan *) make_result(root,
- NIL,
- NULL,
- result_plan);
- }
+ Path *path = (Path *) lfirst(lc);
/*
- * The "base" targetlist for all steps of the windowing process is
- * a flat tlist of all Vars and Aggs needed in the result. (In
- * some cases we wouldn't need to propagate all of these all the
- * way to the top, since they might only be needed as inputs to
- * WindowFuncs. It's probably not worth trying to optimize that
- * though.) We also add window partitioning and sorting
- * expressions to the base tlist, to ensure they're computed only
- * once at the bottom of the stack (that's critical for volatile
- * functions). As we climb up the stack, we'll add outputs for
- * the WindowFuncs computed at each level.
+ * We assume that execution costs of the tlist as such were
+ * already accounted for. However, it still seems appropriate to
+ * charge something more for the executor's general costs of
+ * processing the added tuples. The cost is probably less than
+ * cpu_tuple_cost, though, so we arbitrarily use half of that.
*/
- window_tlist = make_windowInputTargetList(root,
- tlist,
- activeWindows);
+ path->total_cost += path->rows * (tlist_rows - 1) *
+ cpu_tuple_cost / 2;
++<<<<<<< HEAD
+ /*
+ * The copyObject steps here are needed to ensure that each plan
+ * node has a separately modifiable tlist. (XXX wouldn't a
+ * shallow list copy do for that?)
+ */
+ result_plan->targetlist = (List *) copyObject(window_tlist);
+#ifdef XCP
+ /*
+ * We can not guarantee correct result of windowing function
+ * if aggregation is pushed down to Datanodes. So if current plan
+ * produces a distributed result set we should bring it to
+ * coordinator.
+ */
+ if (distribution)
+ {
+ result_plan = (Plan *)
+ make_remotesubplan(root, result_plan, NULL,
+ distribution, current_pathkeys);
+ distribution = NULL;
+ }
+#endif
+
+ foreach(l, activeWindows)
+ {
+ WindowClause *wc = (WindowClause *) lfirst(l);
+ List *window_pathkeys;
+ int partNumCols;
+ AttrNumber *partColIdx;
+ Oid *partOperators;
+ int ordNumCols;
+ AttrNumber *ordColIdx;
+ Oid *ordOperators;
+
+ window_pathkeys = make_pathkeys_for_window(root,
+ wc,
+ tlist);
+
+ /*
+ * This is a bit tricky: we build a sort node even if we don't
+ * really have to sort. Even when no explicit sort is needed,
+ * we need to have suitable resjunk items added to the input
+ * plan's tlist for any partitioning or ordering columns that
+ * aren't plain Vars. (In theory, make_windowInputTargetList
+ * should have provided all such columns, but let's not assume
+ * that here.) Furthermore, this way we can use existing
+ * infrastructure to identify which input columns are the
+ * interesting ones.
+ */
+ if (window_pathkeys)
+ {
+ Sort *sort_plan;
+
+ sort_plan = make_sort_from_pathkeys(root,
+ result_plan,
+ window_pathkeys,
+ -1.0);
+ if (!pathkeys_contained_in(window_pathkeys,
+ current_pathkeys))
+ {
+ /* we do indeed need to sort */
+ result_plan = (Plan *) sort_plan;
+ current_pathkeys = window_pathkeys;
+ }
+#ifdef XCP
+ /*
+ * In our code, Sort may be pushed down to the Datanodes,
+ * and therefore we may get the sort_plan is not really a
+ * Sort node. In this case we should get sort columns from
+ * the top RemoteSubplan
+ */
+ if (!IsA(sort_plan, Sort))
+ {
+ RemoteSubplan *pushdown;
+ pushdown = find_push_down_plan((Plan *)sort_plan, true);
+ Assert(pushdown && pushdown->sort);
+ get_column_info_for_window(root, wc, tlist,
+ pushdown->sort->numCols,
+ pushdown->sort->sortColIdx,
+ &partNumCols,
+ &partColIdx,
+ &partOperators,
+ &ordNumCols,
+ &ordColIdx,
+ &ordOperators);
+ }
+ else
+#endif
+ /* In either case, extract the per-column information */
+ get_column_info_for_window(root, wc, tlist,
+ sort_plan->numCols,
+ sort_plan->sortColIdx,
+ &partNumCols,
+ &partColIdx,
+ &partOperators,
+ &ordNumCols,
+ &ordColIdx,
+ &ordOperators);
+ }
+ else
+ {
+ /* empty window specification, nothing to sort */
+ partNumCols = 0;
+ partColIdx = NULL;
+ partOperators = NULL;
+ ordNumCols = 0;
+ ordColIdx = NULL;
+ ordOperators = NULL;
+ }
++=======
+ path->rows *= tlist_rows;
+ }
+ /* No need to run set_cheapest; we're keeping all paths anyway. */
+ }
- if (lnext(l))
- {
- /* Add the current WindowFuncs to the running tlist */
- window_tlist = add_to_flat_tlist(window_tlist,
- wflists->windowFuncs[wc->winref]);
- }
- else
- {
- /* Install the original tlist in the topmost WindowAgg */
- window_tlist = tlist;
- }
+ /*
+ * Now we are prepared to build the final-output upperrel.
+ */
+ final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
- /* ... and make the WindowAgg plan node */
- result_plan = (Plan *)
- make_windowagg(root,
- (List *) copyObject(window_tlist),
- wflists->windowFuncs[wc->winref],
- wc->winref,
- partNumCols,
- partColIdx,
- partOperators,
- ordNumCols,
- ordColIdx,
- ordOperators,
- wc->frameOptions,
- wc->startOffset,
- wc->endOffset,
- result_plan);
- }
- }
- } /* end of if (setOperations) */
+ /*
+ * If the input rel is marked consider_parallel and there's nothing that's
+ * not parallel-safe in the LIMIT clause, then the final_rel can be marked
+ * consider_parallel as well. Note that if the query has rowMarks or is
+ * not a SELECT, consider_parallel will be false for every relation in the
+ * query.
+ */
+ if (current_rel->consider_parallel &&
+ !has_parallel_hazard(parse->limitOffset, false) &&
+ !has_parallel_hazard(parse->limitCount, false))
+ final_rel->consider_parallel = true;
+
+ /*
+ * If the current_rel belongs to a single FDW, so does the final_rel.
+ */
+ final_rel->serverid = current_rel->serverid;
+ final_rel->userid = current_rel->userid;
+ final_rel->useridiscurrent = current_rel->useridiscurrent;
+ final_rel->fdwroutine = current_rel->fdwroutine;
/*
- * If there is a DISTINCT clause, add the necessary node(s).
+ * Generate paths for the final_rel. Insert all surviving paths, with
+ * LockRows, Limit, and/or ModifyTable steps added if needed.
*/
- if (parse->distinctClause)
+ foreach(lc, current_rel->pathlist)
{
- double dNumDistinctRows;
- long numDistinctRows;
+ Path *path = (Path *) lfirst(lc);
/*
- * If there was grouping or aggregation, use the current number of
- * rows as the estimated number of DISTINCT rows (ie, assume the
- * result was already mostly unique). If not, use the number of
- * distinct-groups calculated previously.
+ * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
+ * (Note: we intentionally test parse->rowMarks not root->rowMarks
+ * here. If there are only non-locking rowmarks, they should be
+ * handled by the ModifyTable node instead. However, root->rowMarks
+ * is what goes into the LockRows node.)
*/
- if (parse->groupClause || parse->groupingSets || root->hasHavingQual || parse->hasAggs)
- dNumDistinctRows = result_plan->plan_rows;
- else
- dNumDistinctRows = dNumGroups;
-
- /* Also convert to long int --- but 'ware overflow! */
- numDistinctRows = (long) Min(dNumDistinctRows, (double) LONG_MAX);
-
- /* Choose implementation method if we didn't already */
- if (!tested_hashed_distinct)
+ if (parse->rowMarks)
{
- /*
- * At this point, either hashed or sorted grouping will have to
- * work from result_plan, so we pass that as both "cheapest" and
- * "sorted".
- */
- use_hashed_distinct =
- choose_hashed_distinct(root,
- tuple_fraction, limit_tuples,
- result_plan->plan_rows,
- result_plan->plan_width,
- result_plan->startup_cost,
- result_plan->total_cost,
- result_plan->startup_cost,
- result_plan->total_cost,
- current_pathkeys,
- dNumDistinctRows);
+ path = (Path *) create_lockrows_path(root, final_rel, path,
+ root->rowMarks,
+ SS_assign_special_param(root));
}
- if (use_hashed_distinct)
+ /*
+ * If there is a LIMIT/OFFSET clause, add the LIMIT node.
+ */
+ if (limit_needed(parse))
{
++<<<<<<< HEAD
+#ifdef XCP
+ result_plan = grouping_distribution(root, result_plan,
+ list_length(parse->distinctClause),
+ extract_grouping_cols(parse->distinctClause,
+ result_plan->targetlist),
+ current_pathkeys,
+ &distribution);
+#endif
+ /* Hashed aggregate plan --- no sort needed */
+ result_plan = (Plan *) make_agg(root,
+ result_plan->targetlist,
+ NIL,
+ AGG_HASHED,
+ NULL,
+ list_length(parse->distinctClause),
+ extract_grouping_cols(parse->distinctClause,
+ result_plan->targetlist),
+ extract_grouping_ops(parse->distinctClause),
+ NIL,
+ numDistinctRows,
+ result_plan);
+ /* Hashed aggregation produces randomly-ordered results */
+ current_pathkeys = NIL;
++=======
+ path = (Path *) create_limit_path(root, final_rel, path,
+ parse->limitOffset,
+ parse->limitCount,
+ offset_est, count_est);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
}
- else
+
+ /*
+ * If this is an INSERT/UPDATE/DELETE, and we're not being called from
+ * inheritance_planner, add the ModifyTable node.
+ */
+ if (parse->commandType != CMD_SELECT && !inheritance_update)
{
+ List *withCheckOptionLists;
+ List *returningLists;
+ List *rowMarks;
+
/*
- * Use a Unique node to implement DISTINCT. Add an explicit sort
- * if we couldn't make the path come out the way the Unique node
- * needs it. If we do have to sort, always sort by the more
- * rigorous of DISTINCT and ORDER BY, to avoid a second sort
- * below. However, for regular DISTINCT, don't sort now if we
- * don't have to --- sorting afterwards will likely be cheaper,
- * and also has the possibility of optimizing via LIMIT. But for
- * DISTINCT ON, we *must* force the final sort now, else it won't
- * have the desired behavior.
+ * Set up the WITH CHECK OPTION and RETURNING lists-of-lists, if
+ * needed.
*/
- List *needed_pathkeys;
-
- if (parse->hasDistinctOn &&
- list_length(root->distinct_pathkeys) <
- list_length(root->sort_pathkeys))
- needed_pathkeys = root->sort_pathkeys;
+ if (parse->withCheckOptions)
+ withCheckOptionLists = list_make1(parse->withCheckOptions);
else
- needed_pathkeys = root->distinct_pathkeys;
-
- if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
- {
- if (list_length(root->distinct_pathkeys) >=
- list_length(root->sort_pathkeys))
- current_pathkeys = root->distinct_pathkeys;
- else
- {
- current_pathkeys = root->sort_pathkeys;
- /* Assert checks that parser didn't mess up... */
- Assert(pathkeys_contained_in(root->distinct_pathkeys,
- current_pathkeys));
- }
+ withCheckOptionLists = NIL;
- result_plan = (Plan *) make_sort_from_pathkeys(root,
- result_plan,
- current_pathkeys,
- -1.0);
- }
+ if (parse->returningList)
+ returningLists = list_make1(parse->returningList);
+ else
+ returningLists = NIL;
-
- /*
- * If ORDER BY was given and we were not able to make the plan come out in
- * the right order, add an explicit sort step.
- */
- if (parse->sortClause)
- {
- if (!pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
- {
- result_plan = (Plan *) make_sort_from_pathkeys(root,
- result_plan,
- root->sort_pathkeys,
- limit_tuples);
- current_pathkeys = root->sort_pathkeys;
++<<<<<<< HEAD
+#ifdef XCP
+ result_plan = grouping_distribution(root, result_plan,
+ list_length(parse->distinctClause),
+ extract_grouping_cols(parse->distinctClause,
+ result_plan->targetlist),
+ current_pathkeys,
+ &distribution);
+#endif
+ result_plan = (Plan *) make_unique(result_plan,
+ parse->distinctClause);
+ result_plan->plan_rows = dNumDistinctRows;
+ /* The Unique node won't change sort ordering */
+ }
+ }
++=======
+ /*
+ * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
+ * will have dealt with fetching non-locked marked rows, else we
+ * need to have ModifyTable do that.
+ */
+ if (parse->rowMarks)
+ rowMarks = NIL;
+ else
+ rowMarks = root->rowMarks;
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
+
+ path = (Path *)
+ create_modifytable_path(root, final_rel,
+ parse->commandType,
+ parse->canSetTag,
+ parse->resultRelation,
+ list_make1_int(parse->resultRelation),
+ list_make1(path),
+ list_make1(root),
+ withCheckOptionLists,
+ returningLists,
+ rowMarks,
+ parse->onConflict,
+ SS_assign_special_param(root));
}
- }
-
- /*
- * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
- * (Note: we intentionally test parse->rowMarks not root->rowMarks here.
- * If there are only non-locking rowmarks, they should be handled by the
- * ModifyTable node instead.)
- */
- if (parse->rowMarks)
- {
- result_plan = (Plan *) make_lockrows(result_plan,
- root->rowMarks,
- SS_assign_special_param(root));
- /*
- * The result can no longer be assumed sorted, since locking might
- * cause the sort key columns to be replaced with new values.
- */
- current_pathkeys = NIL;
+ /* And shove it into final_rel */
+ add_path(final_rel, path);
}
/*
- * Finally, if there is a LIMIT/OFFSET clause, add the LIMIT node.
+ * If there is an FDW that's responsible for all baserels of the query,
+ * let it consider adding ForeignPaths.
*/
-
- /*
- * Return the actual output ordering in query_pathkeys for possible use by
- * an outer query level.
- */
- root->query_pathkeys = current_pathkeys;
-
++<<<<<<< HEAD
+ if (limit_needed(parse))
+ {
+#ifdef XCP
+ /* We should put Limit on top of distributed results */
+ if (distribution)
+ {
+ result_plan = (Plan *)
+ make_remotesubplan(root, result_plan, NULL,
+ distribution, current_pathkeys);
+ distribution = NULL;
+ }
+#endif
+ result_plan = (Plan *) make_limit(result_plan,
+ parse->limitOffset,
+ parse->limitCount,
+ offset_est,
+ count_est);
+ }
++=======
+ if (final_rel->fdwroutine &&
+ final_rel->fdwroutine->GetForeignUpperPaths)
+ final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
+ current_rel, final_rel);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
+
+ /* Let extensions possibly add some more paths */
+ if (create_upper_paths_hook)
+ (*create_upper_paths_hook) (root, UPPERREL_FINAL,
+ current_rel, final_rel);
+
++<<<<<<< HEAD
+#ifdef XCP
+ /*
+ * Adjust query distribution if requested
+ */
+ if (root->distribution)
+ {
+ if (equal_distributions(root, root->distribution, distribution))
+ {
+ if (IsLocatorReplicated(distribution->distributionType) &&
+ contain_volatile_functions((Node *) result_plan->targetlist))
+ ereport(ERROR,
+ (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
+ errmsg("can not update replicated table with result of volatile function")));
+ /*
+ * Source tuple will be consumed on the same node where it is
+ * produced, so if it is known that some node does not yield tuples
+ * we do not want to send subquery for execution on these nodes
+ * at all.
+ * So copy the restriction to the external distribution.
+ * XXX Is that ever possible if external restriction is already
+ * defined? If yes we probably should use intersection of the sets,
+ * and if resulting set is empty create dummy plan and set it as
+ * the result_plan. Need to think this over
+ */
+ root->distribution->restrictNodes =
+ bms_copy(distribution->restrictNodes);
+ }
+ else
+ {
+ RemoteSubplan *distributePlan;
+ /*
+ * If the planned statement is either UPDATE or DELETE different
+ * distributions here mean the ModifyTable node will be placed on
+ * top of RemoteSubquery. UPDATE and DELETE versions of ModifyTable
+ * use TID of incoming tuple to apply the changes, but the
+ * RemoteSubquery node supplies RemoteTuples, without such field.
+ * Therefore we can not execute such plan.
+ * Most common case is when UPDATE statement modifies the
+ * distribution column. Also incorrect distributed plan is possible
+ * if planning a complex UPDATE or DELETE statement involving table
+ * join.
+ * We output different error messages in UPDATE and DELETE cases
+ * mostly for compatibility with PostgresXC. It is hard to determine
+ * here, if such plan is because updated partitioning key or poorly
+ * planned join, so in case of UPDATE we assume the first case as
+ * more probable, for DELETE the second case is only possible.
+ * The error message may be misleading, if that is UPDATE and join,
+ * but hope we will target distributed update problem soon.
+ * There are two ways of fixing that:
+ * 1. Improve distribution planner to never consider to redistribute
+ * target table. So if planner finds that it has no choice, it would
+ * throw error somewhere else. So here we only be catching cases of
+ * updating distribution columns.
+ * 2. Modify executor and allow distribution column updates. However
+ * there are a lot of issues behind the scene when implementing that
+ * approach.
+ */
+ if (parse->commandType == CMD_UPDATE)
+ ereport(ERROR,
+ (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
+ errmsg("could not plan this distributed update"),
+ errdetail("correlated UPDATE or updating distribution column currently not supported in Postgres-XL.")));
+ if (parse->commandType == CMD_DELETE)
+ ereport(ERROR,
+ (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
+ errmsg("could not plan this distributed delete"),
+ errdetail("correlated or complex DELETE is currently not supported in Postgres-XL.")));
+
+ /*
+ * Redistribute result according to requested distribution.
+ */
+ if ((distributePlan = find_push_down_plan(result_plan, true)))
+ {
+ Bitmapset *tmpset;
+ int nodenum;
+
+ distributePlan->distributionType = root->distribution->distributionType;
+ distributePlan->distributionKey = InvalidAttrNumber;
+ if (root->distribution->distributionExpr)
+ {
+ ListCell *lc;
+
+ /* Find distribution expression in the target list */
+ foreach(lc, distributePlan->scan.plan.targetlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+
+ if (equal(tle->expr, root->distribution->distributionExpr))
+ {
+ distributePlan->distributionKey = tle->resno;
+ break;
+ }
+ }
+
+ if (distributePlan->distributionKey == InvalidAttrNumber)
+ {
+ Plan *lefttree = distributePlan->scan.plan.lefttree;
+ Plan *plan;
+ TargetEntry *newtle;
+
+ /* The expression is not found, need to add junk */
+ newtle = makeTargetEntry((Expr *) root->distribution->distributionExpr,
+ list_length(lefttree->targetlist) + 1,
+ NULL,
+ true);
+
+ if (is_projection_capable_plan(lefttree))
+ {
+ /* Ok to modify subplan's target list */
+ lefttree->targetlist = lappend(lefttree->targetlist,
+ newtle);
+ }
+ else
+ {
+ /* Use Result node to calculate expression */
+ List *newtlist = list_copy(lefttree->targetlist);
+ newtlist = lappend(newtlist, newtle);
+ lefttree = (Plan *) make_result(root, newtlist, NULL, lefttree);
+ distributePlan->scan.plan.lefttree = lefttree;
+ }
+ /* Update all the hierarchy */
+ for (plan = result_plan; plan != lefttree; plan = plan->lefttree)
+ plan->targetlist = lefttree->targetlist;
+ }
+ }
+ tmpset = bms_copy(root->distribution->nodes);
+ distributePlan->distributionNodes = NIL;
+ while ((nodenum = bms_first_member(tmpset)) >= 0)
+ distributePlan->distributionNodes = lappend_int(
+ distributePlan->distributionNodes, nodenum);
+ bms_free(tmpset);
+ }
+ else if (!(IsA(result_plan, Result) && result_plan->lefttree ==
+ NULL &&
+ ((root->distribution->distributionType == 'H' &&
+ bms_num_members(root->distribution->restrictNodes) == 1) ||
+ (root->distribution->distributionType == 'R' &&
+ !contain_mutable_functions((Node *)result_plan->targetlist)))))
+ result_plan = (Plan *) make_remotesubplan(root,
+ result_plan,
+ root->distribution,
+ distribution,
+ NULL);
+ }
+ }
+ else
+ {
+ /*
+ * Inform caller about distribution of the subplan
+ */
+ root->distribution = distribution;
+ }
+#endif
+
+ return result_plan;
++=======
+ /* Note: currently, we leave it to callers to do set_cheapest() */
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
}
* Post-processing of a completed plan tree: fix references to subplan
* vars, compute regproc values for operators, etc
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
if (fscan->fdw_scan_tlist != NIL || fscan->scan.scanrelid == 0)
{
- /* Adjust tlist, qual, fdw_exprs to reference custom scan tuple */
+ /*
+ * Adjust tlist, qual, fdw_exprs, fdw_recheck_quals to reference
+ * foreign scan tuple
+ */
indexed_tlist *itlist = build_tlist_index(fscan->fdw_scan_tlist);
+#ifdef XCP
+ fscan->scan.plan.targetlist = (List *)
+ fix_upper_expr(root,
+ (Node *) fscan->scan.plan.targetlist,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ false);
+ fscan->scan.plan.qual = (List *)
+ fix_upper_expr(root,
+ (Node *) fscan->scan.plan.qual,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ false);
+ fscan->fdw_exprs = (List *)
+ fix_upper_expr(root,
+ (Node *) fscan->fdw_exprs,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ false);
+#else
fscan->scan.plan.targetlist = (List *)
fix_upper_expr(root,
(Node *) fscan->scan.plan.targetlist,
itlist,
INDEX_VAR,
rtoffset);
+#endif
+ fscan->fdw_recheck_quals = (List *)
+ fix_upper_expr(root,
+ (Node *) fscan->fdw_recheck_quals,
+ itlist,
+ INDEX_VAR,
+ rtoffset);
pfree(itlist);
/* fdw_scan_tlist itself just needs fix_scan_list() adjustments */
fscan->fdw_scan_tlist =
* subselect.c
* Planning routines for subselects and parameters.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
/* plan_params should not be in use in current query level */
Assert(root->plan_params == NIL);
- /*
- * Generate the plan for the subquery.
- */
- plan = subquery_planner(root->glob, subquery,
- root,
- false, tuple_fraction,
- &subroot);
+ /* Generate Paths for the subquery */
+ subroot = subquery_planner(root->glob, subquery,
+ root,
+ false, tuple_fraction);
+#ifdef XCP
+ if (subroot->distribution)
+ {
+ plan = (Plan *) make_remotesubplan(subroot,
+ plan,
+ NULL,
+ subroot->distribution,
+ subroot->query_pathkeys);
+ /*
+ * SS_finalize_plan has already been run on the subplan,
+ * so we have to copy parameter info to wrapper plan node.
+ */
+ plan->extParam = bms_copy(plan->lefttree->extParam);
+ plan->allParam = bms_copy(plan->lefttree->allParam);
+ }
+#endif
/* Isolate the params needed by this specific subplan */
plan_params = root->plan_params;
Assert(root->plan_params == NIL);
/*
- * Generate the plan for the CTE query. Always plan for full
- * retrieval --- we don't have enough info to predict otherwise.
+ * Generate Paths for the CTE query. Always plan for full retrieval
+ * --- we don't have enough info to predict otherwise.
*/
- plan = subquery_planner(root->glob, subquery,
- root,
- cte->cterecursive, 0.0,
- &subroot);
+ subroot = subquery_planner(root->glob, subquery,
+ root,
+ cte->cterecursive, 0.0);
+#ifdef XCP
+ if (subroot->distribution)
+ {
+ plan = (Plan *) make_remotesubplan(subroot,
+ plan,
+ NULL,
+ subroot->distribution,
+ subroot->query_pathkeys);
+ /*
+ * SS_finalize_plan has already been run on the subplan,
+ * so we have to copy parameter info to wrapper plan node.
+ */
+ plan->extParam = bms_copy(plan->lefttree->extParam);
+ plan->allParam = bms_copy(plan->lefttree->allParam);
+ }
+#endif
/*
* Since the current query level doesn't yet contain any RTEs, it
subroot->eq_classes = NIL;
subroot->append_rel_list = NIL;
subroot->rowMarks = NIL;
+ memset(subroot->upper_rels, 0, sizeof(subroot->upper_rels));
+ memset(subroot->upper_targets, 0, sizeof(subroot->upper_targets));
+ subroot->processed_tlist = NIL;
+ subroot->grouping_map = NULL;
+ subroot->minmax_aggs = NIL;
+ subroot->hasInheritedTarget = false;
subroot->hasRecursion = false;
subroot->wt_param_id = -1;
- subroot->non_recursive_plan = NULL;
+ subroot->recursiveOk = true;
+ subroot->non_recursive_path = NULL;
/* No CTEs to worry about */
Assert(subquery->cteList == NIL);
* list and row ID information needed for SELECT FOR UPDATE locking and/or
* EvalPlanQual checking.
*
- * NOTE: the rewriter's rewriteTargetListIU and rewriteTargetListUD
- * routines also do preprocessing of the targetlist. The division of labor
- * between here and there is a bit arbitrary and historical.
+ * The rewriter's rewriteTargetListIU and rewriteTargetListUD routines
+ * also do preprocessing of the targetlist. The division of labor between
+ * here and there is partially historical, but it's not entirely arbitrary.
+ * In particular, consider an UPDATE across an inheritance tree. What the
+ * rewriter does need be done only once (because it depends only on the
+ * properties of the parent relation). What's done here has to be done over
+ * again for each child relation, because it depends on the column list of
+ * the child, which might have more columns and/or a different column order
+ * than the parent.
*
+ * The fact that rewriteTargetListIU sorts non-resjunk tlist entries by column
+ * position, which expand_targetlist depends on, violates the above comment
+ * because the sorting is only valid for the parent relation. In inherited
+ * UPDATE cases, adjust_inherited_tlist runs in between to take care of fixing
+ * the tlists for child tables to keep expand_targetlist happy. We do it like
+ * that because it's faster in typical non-inherited cases.
*
- *
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
#include "access/sysattr.h"
#include "catalog/pg_type.h"
#include "nodes/makefuncs.h"
+#ifdef XCP
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#endif
#include "optimizer/prep.h"
#include "optimizer/tlist.h"
+ #include "optimizer/var.h"
#include "parser/parsetree.h"
#include "parser/parse_coerce.h"
#include "utils/rel.h"
* append relations, and thenceforth share code with the UNION ALL case.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
/* plan_params should not be in use in current query level */
Assert(root->plan_params == NIL);
- /*
- * Generate plan for primitive subquery
- */
- subplan = subquery_planner(root->glob, subquery,
- root,
- false, tuple_fraction,
- &subroot);
+ /* Generate a subroot and Paths for the subquery */
+ subroot = rel->subroot = subquery_planner(root->glob, subquery,
+ root,
+ false,
+ root->tuple_fraction);
+#ifdef XCP
+ if (subroot->distribution)
+ {
+ subplan = (Plan *) make_remotesubplan(subroot,
+ subplan,
+ NULL,
+ subroot->distribution,
+ subroot->query_pathkeys);
+ }
+#endif
+
+ /* Save subroot and subplan in RelOptInfo for setrefs.c */
+ rel->subplan = subplan;
+ rel->subroot = subroot;
+
+ if (root->recursiveOk)
+ root->recursiveOk = subroot->recursiveOk;
/*
* It should not be possible for the primitive query to contain any
* pathnode.c
* Routines to manipulate pathlists and create path nodes
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
pathnode->pathtype = T_SeqScan;
pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
pathnode->param_info = get_baserel_parampathinfo(root, rel,
required_outer);
+ pathnode->parallel_aware = parallel_workers > 0 ? true : false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = parallel_workers;
pathnode->pathkeys = NIL; /* seqscan has unordered result */
+#ifdef XCP
+ set_scanpath_distribution(root, rel, pathnode);
+ if (rel->baserestrictinfo)
+ {
+ ListCell *lc;
+ foreach (lc, rel->baserestrictinfo)
+ {
+ RestrictInfo *ri = (RestrictInfo *) lfirst(lc);
+ restrict_distribution(root, ri, pathnode);
+ }
+ }
+#endif
+
cost_seqscan(pathnode, root, rel, pathnode->param_info);
return pathnode;
pathnode->pathtype = T_SampleScan;
pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
pathnode->param_info = get_baserel_parampathinfo(root, rel,
required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
pathnode->pathkeys = NIL; /* samplescan has unordered result */
+#ifdef XCP
+ set_scanpath_distribution(root, rel, pathnode);
+ if (rel->baserestrictinfo)
+ {
+ ListCell *lc;
+ foreach (lc, rel->baserestrictinfo)
+ {
+ RestrictInfo *ri = (RestrictInfo *) lfirst(lc);
+ restrict_distribution(root, ri, pathnode);
+ }
+ }
+#endif
+
cost_samplescan(pathnode, root, rel, pathnode->param_info);
return pathnode;
pathnode->path.pathtype = T_Append;
pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_appendrel_parampathinfo(rel,
required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = parallel_workers;
pathnode->path.pathkeys = NIL; /* result is always considered
* unsorted */
+#ifdef XCP
+ /*
+ * Append path is used to implement scans of inherited tables and some
+ * "set" operations, like UNION ALL. While all inherited tables should
+ * have the same distribution, UNION'ed queries may have different.
+ * When paths being appended have the same distribution it is OK to push
+ * Append down to the data nodes. If not, perform "coordinator" Append.
+ */
+
+ /* Special case of the dummy relation, if the subpaths list is empty */
+ if (subpaths)
+ {
+ /* Take distribution of the first node */
+ l = list_head(subpaths);
+ subpath = (Path *) lfirst(l);
+ distribution = copyObject(subpath->distribution);
+ /*
+ * Check remaining subpaths, if all distributions equal to the first set
+ * it as a distribution of the Append path; otherwise make up coordinator
+ * Append
+ */
+ while ((l = lnext(l)))
+ {
+ subpath = (Path *) lfirst(l);
+
+ /*
+ * For Append and MergeAppend paths, we are most often dealing with
+ * different relations, appended together. So its very likely that
+ * the distribution for each relation will have a different varno.
+ * But we should be able to push down Append and MergeAppend as
+ * long as rest of the distribution information matches.
+ *
+ * equalDistribution() compares everything except the varnos
+ */
+ if (equalDistribution(distribution, subpath->distribution))
+ {
+ /*
+ * Both distribution and subpath->distribution may be NULL at
+ * this point, or they both are not null.
+ */
+ if (distribution && subpath->distribution->restrictNodes)
+ distribution->restrictNodes = bms_union(
+ distribution->restrictNodes,
+ subpath->distribution->restrictNodes);
+ }
+ else
+ {
+ break;
+ }
+ }
+ if (l)
+ {
+ List *newsubpaths = NIL;
+ foreach(l, subpaths)
+ {
+ subpath = (Path *) lfirst(l);
+ if (subpath->distribution)
+ subpath = redistribute_path(subpath, LOCATOR_TYPE_NONE,
+ NULL, NULL, NULL);
+ newsubpaths = lappend(newsubpaths, subpath);
+ }
+ subpaths = newsubpaths;
+ pathnode->path.distribution = NULL;
+ }
+ else
+ pathnode->path.distribution = distribution;
+ }
+#endif
pathnode->subpaths = subpaths;
/*
pathnode->path.pathtype = T_MergeAppend;
pathnode->path.parent = rel;
+#ifdef XCP
+ /*
+ * It is safe to push down MergeAppend if all subpath distributions
+ * are the same and these distributions are Replicated or distribution key
+ * is the expression of the first pathkey.
+ */
+ /* Take distribution of the first node */
+ l = list_head(subpaths);
+ subpath = (Path *) lfirst(l);
+ distribution = copyObject(subpath->distribution);
+ /*
+ * Verify if it is safe to push down MergeAppend with this distribution.
+ * TODO implement check of the second condition (distribution key is the
+ * first pathkey)
+ */
+ if (distribution == NULL || IsLocatorReplicated(distribution->distributionType))
+ {
+ /*
+ * Check remaining subpaths, if all distributions equal to the first set
+ * it as a distribution of the Append path; otherwise make up coordinator
+ * Append
+ */
+ while ((l = lnext(l)))
+ {
+ subpath = (Path *) lfirst(l);
+
+ /*
+ * See comments in Append path
+ */
+ if (distribution && equalDistribution(distribution, subpath->distribution))
+ {
+ if (subpath->distribution->restrictNodes)
+ distribution->restrictNodes = bms_union(
+ distribution->restrictNodes,
+ subpath->distribution->restrictNodes);
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+ if (l)
+ {
+ List *newsubpaths = NIL;
+ foreach(l, subpaths)
+ {
+ subpath = (Path *) lfirst(l);
+ if (subpath->distribution)
+ subpath = redistribute_path(subpath, LOCATOR_TYPE_NONE,
+ NULL, NULL, NULL);
+ newsubpaths = lappend(newsubpaths, subpath);
+ }
+ subpaths = newsubpaths;
+ pathnode->path.distribution = NULL;
+ }
+ else
+ pathnode->path.distribution = distribution;
+#endif
+
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_appendrel_parampathinfo(rel,
required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
pathnode->path.pathkeys = pathkeys;
pathnode->subpaths = subpaths;
return result;
}
+ /*
+ * create_gather_path
+ * Creates a path corresponding to a gather scan, returning the
+ * pathnode.
+ *
+ * 'rows' may optionally be set to override row estimates from other sources.
+ */
+ GatherPath *
+ create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
+ PathTarget *target, Relids required_outer, double *rows)
+ {
+ GatherPath *pathnode = makeNode(GatherPath);
+
+ Assert(subpath->parallel_safe);
+
+ pathnode->path.pathtype = T_Gather;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = false;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->path.pathkeys = NIL; /* Gather has unordered result */
+
+ pathnode->subpath = subpath;
+ pathnode->single_copy = false;
+
+ if (pathnode->path.parallel_workers == 0)
+ {
+ pathnode->path.parallel_workers = 1;
+ pathnode->path.pathkeys = subpath->pathkeys;
+ pathnode->single_copy = true;
+ }
+
+ cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
+
+ return pathnode;
+ }
+
/*
* create_subqueryscan_path
- * Creates a path corresponding to a sequential scan of a subquery,
+ * Creates a path corresponding to a scan of a subquery,
* returning the pathnode.
*/
- Path *
+ SubqueryScanPath *
+#ifdef XCP
- create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
++create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
+ List *pathkeys, Relids required_outer,
+ Distribution *distribution)
+#else
- create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
+ create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
List *pathkeys, Relids required_outer)
+#endif
{
- Path *pathnode = makeNode(Path);
+ SubqueryScanPath *pathnode = makeNode(SubqueryScanPath);
- pathnode->pathtype = T_SubqueryScan;
- pathnode->parent = rel;
- pathnode->param_info = get_baserel_parampathinfo(root, rel,
- required_outer);
- pathnode->pathkeys = pathkeys;
+#ifdef XCP
+ pathnode->distribution = distribution;
+#endif
+ pathnode->path.pathtype = T_SubqueryScan;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->path.pathkeys = pathkeys;
+ pathnode->subpath = subpath;
- cost_subqueryscan(pathnode, root, rel, pathnode->param_info);
+ cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info);
return pathnode;
}
loop_count);
}
case T_SubqueryScan:
- return create_subqueryscan_path(root, rel, path->pathkeys,
- required_outer, path->distribution);
+#ifdef XCP
- return create_subqueryscan_path(root, rel, path->pathkeys,
- required_outer);
++ {
++ SubqueryScanPath *spath = (SubqueryScanPath *) path;
++
++ return (Path *) create_subqueryscan_path(root,
++ rel,
++ spath->subpath,
++ spath->path.pathkeys,
++ required_outer,
++ path->distribution);
++ }
+#else
+ {
+ SubqueryScanPath *spath = (SubqueryScanPath *) path;
+
+ return (Path *) create_subqueryscan_path(root,
+ rel,
+ spath->subpath,
+ spath->path.pathkeys,
+ required_outer);
+ }
+#endif
default:
break;
}
* routines for accessing the system catalogs
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include "optimizer/placeholder.h"
#include "optimizer/plancat.h"
#include "optimizer/restrictinfo.h"
+ #include "optimizer/tlist.h"
#include "utils/hsearch.h"
+#ifdef PGXC
+#include "pgxc/pgxc.h"
+#endif
typedef struct JoinHashEntry
{
* contain optimizable statements, which we should transform.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/backend/parser/analyze.c
ExplainStmt *stmt);
static Query *transformCreateTableAsStmt(ParseState *pstate,
CreateTableAsStmt *stmt);
+#ifdef PGXC
+static Query *transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt);
+#endif
+
static void transformLockingClause(ParseState *pstate, Query *qry,
LockingClause *lc, bool pushedDown);
+ #ifdef RAW_EXPRESSION_COVERAGE_TEST
+ static bool test_raw_expression_coverage(Node *node, void *context);
+ #endif
+#ifdef XCP
+static void ParseAnalyze_rtable_walk(List *rtable);
+static void ParseAnalyze_substitute_func(FuncExpr *funcexpr);
+#endif
/*
* parse_analyze
qry->rowMarks = lappend(qry->rowMarks, rc);
}
+#ifdef XCP
+post_parse_analyze_hook_type prev_ParseAnalyze_callback;
+
+/*
+ * Check if the query contains references to any pg_catalog tables that should
+ * be remapped to storm_catalog. The list is obtained from the
+ * storm_catalog_remap_string GUC. Also do this only for normal users
+ */
+void
+ParseAnalyze_callback(ParseState *pstate, Query *query)
+{
+ if (prev_ParseAnalyze_callback)
+ prev_ParseAnalyze_callback(pstate, query);
+
+ if (query && query->commandType == CMD_UTILITY)
+ return;
+
+ ParseAnalyze_rtable_walk(query->rtable);
+}
+
+static void
+ParseAnalyze_rtable_walk(List *rtable)
+{
+ ListCell *item;
+
+ if (!IsUnderPostmaster || superuser())
+ return;
+
+ foreach(item, rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(item);
+
+ if (rte->rtekind == RTE_FUNCTION)
+ {
+ ListCell *lc;
+ foreach(lc, rte->functions)
+ {
+ RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
+ ParseAnalyze_substitute_func((FuncExpr *) rtfunc->funcexpr);
+ }
+ }
+ else if (rte->rtekind == RTE_SUBQUERY) /* recurse for subqueries */
+ ParseAnalyze_rtable_walk(rte->subquery->rtable);
+ }
+}
+
+static void
+ParseAnalyze_substitute_func(FuncExpr *funcexpr)
+{
+ StringInfoData buf;
+ initStringInfo(&buf);
+
+ if (get_func_namespace(funcexpr->funcid) == PG_CATALOG_NAMESPACE)
+ {
+ Oid funcid = InvalidOid;
+ const char *funcname = get_func_name(funcexpr->funcid);
+
+ /* Check if the funcname is in storm_catalog_remap_string */
+ appendStringInfoString(&buf, funcname);
+ appendStringInfoChar(&buf, ',');
+
+ elog(DEBUG2, "the constructed name is %s", buf.data);
+
+ /*
+ * The unqualified function name should be satisfied from the
+ * storm_catalog appropriately. Just provide a warning for now if
+ * it is not..
+ */
+ if (strstr(storm_catalog_remap_string, buf.data))
+ {
+ Oid *argtypes = NULL;
+ int nargs;
+
+ get_func_signature(funcexpr->funcid, &argtypes, &nargs);
+ funcid = get_funcid(funcname, buildoidvector(argtypes, nargs),
+ STORM_CATALOG_NAMESPACE);
+ }
+ else
+ return;
+
+ if (get_func_namespace(funcid) != STORM_CATALOG_NAMESPACE)
+ ereport(WARNING,
+ (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+ errmsg("Entry (%s) present in storm_catalog_remap_string "
+ "but object not picked from STORM_CATALOG", funcname)));
+ else /* change the funcid to the storm_catalog one */
+ funcexpr->funcid = funcid;
+ }
+}
+#endif
++
+ /*
+ * Coverage testing for raw_expression_tree_walker().
+ *
+ * When enabled, we run raw_expression_tree_walker() over every DML statement
+ * submitted to parse analysis. Without this provision, that function is only
+ * applied in limited cases involving CTEs, and we don't really want to have
+ * to test everything inside as well as outside a CTE.
+ */
+ #ifdef RAW_EXPRESSION_COVERAGE_TEST
+
+ static bool
+ test_raw_expression_coverage(Node *node, void *context)
+ {
+ if (node == NULL)
+ return false;
+ return raw_expression_tree_walker(node,
+ test_raw_expression_coverage,
+ context);
+ }
+
+ #endif /* RAW_EXPRESSION_COVERAGE_TEST */
* gram.y
* POSTGRESQL BISON rules/actions
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
#include "catalog/index.h"
#include "catalog/namespace.h"
+ #include "catalog/pg_am.h"
#include "catalog/pg_trigger.h"
#include "commands/defrem.h"
+#include "miscadmin.h"
#include "commands/trigger.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
DeallocateStmt PrepareStmt ExecuteStmt
DropOwnedStmt ReassignOwnedStmt
AlterTSConfigurationStmt AlterTSDictionaryStmt
- CreateMatViewStmt RefreshMatViewStmt
+ BarrierStmt PauseStmt AlterNodeStmt CreateNodeStmt DropNodeStmt
+ CreateNodeGroupStmt DropNodeGroupStmt
+ CreateMatViewStmt RefreshMatViewStmt CreateAmStmt
%type <node> select_no_parens select_with_parens select_clause
simple_select values_clause
CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURSOR CYCLE
DATA_P DATABASE DAY_P DEALLOCATE DEC DECIMAL_P DECLARE DEFAULT DEFAULTS
- DEFERRABLE DEFERRED DEFINER DELETE_P DELIMITER DELIMITERS DESC
+ DEFERRABLE DEFERRED DEFINER DELETE_P DELIMITER DELIMITERS DEPENDS DESC
- DICTIONARY DISABLE_P DISCARD DISTINCT DO DOCUMENT_P DOMAIN_P DOUBLE_P DROP
+/* PGXC_BEGIN */
+ DICTIONARY DIRECT DISABLE_P DISCARD DISTINCT DISTKEY DISTRIBUTE DISTRIBUTED
+ DISTSTYLE DO DOCUMENT_P DOMAIN_P DOUBLE_P
+/* PGXC_END */
+ DROP
EACH ELSE ENABLE_P ENCODING ENCRYPTED END_P ENUM_P ESCAPE EVENT EXCEPT
EXCLUDE EXCLUDING EXCLUSIVE EXECUTE EXISTS EXPLAIN
LEADING LEAKPROOF LEAST LEFT LEVEL LIKE LIMIT LISTEN LOAD LOCAL
LOCALTIME LOCALTIMESTAMP LOCATION LOCK_P LOCKED LOGGED
- MAPPING MATCH MATERIALIZED MAXVALUE MINUTE_P MINVALUE MODE MONTH_P MOVE
+ MAPPING MATCH MATERIALIZED MAXVALUE METHOD MINUTE_P MINVALUE MODE MONTH_P MOVE
- NAME_P NAMES NATIONAL NATURAL NCHAR NEXT NO NONE
+ NAME_P NAMES NATIONAL NATURAL NCHAR NEXT NO NODE NONE
NOT NOTHING NOTIFY NOTNULL NOWAIT NULL_P NULLIF
NULLS_P NUMERIC
OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTION OPTIONS OR
ORDER ORDINALITY OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER
- PARSER PARTIAL PARTITION PASSING PASSWORD PAUSE PLACING PLANS POLICY POSITION
- /* PGXC_BEGIN */
- PRECEDING PRECISION PREFERRED PRESERVE PREPARE PREPARED PRIMARY
- /* PGXC_END */
- PARALLEL PARSER PARTIAL PARTITION PASSING PASSWORD PLACING PLANS POLICY
- POSITION PRECEDING PRECISION PRESERVE PREPARE PREPARED PRIMARY
++ PARALLEL PARSER PARTIAL PARTITION PASSING PASSWORD PAUSE PLACING PLANS POLICY
++ POSITION PRECEDING PRECISION PREFERRED PRESERVE PREPARE PREPARED PRIMARY
PRIOR PRIVILEGES PROCEDURAL PROCEDURE PROGRAM
QUOTE
| AlterForeignTableStmt
| AlterFunctionStmt
| AlterGroupStmt
+ | AlterNodeStmt
+ | AlterObjectDependsStmt
| AlterObjectSchemaStmt
| AlterOwnerStmt
+ | AlterOperatorStmt
| AlterPolicyStmt
| AlterSeqStmt
| AlterSystemStmt
| DELETE_P
| DELIMITER
| DELIMITERS
+ | DEPENDS
| DICTIONARY
+ | DIRECT
| DISABLE_P
| DISCARD
+/* PGXC_BEGIN */
+ | DISTKEY
+ | DISTRIBUTE
+ | DISTRIBUTED
+ | DISTSTYLE
+/* PGXC_END */
| DOCUMENT_P
| DOMAIN_P
| DOUBLE_P
* parse_agg.c
* handle aggregates and window functions in parser
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* For an ordered-set aggregate, remember that agg_input_types describes
* the direct arguments followed by the aggregated arguments.
*
- * transfn_oid, invtransfn_oid and finalfn_oid identify the funcs to be
- * called; the latter two may be InvalidOid.
+ * transfn_oid and invtransfn_oid identify the funcs to be called; the
+ * latter may be InvalidOid, however if invtransfn_oid is set then
+ * transfn_oid must also be set.
*
* Pointers to the constructed trees are returned into *transfnexpr,
- * *invtransfnexpr and *finalfnexpr. If there is no invtransfn or finalfn,
- * the respective pointers are set to NULL. Since use of the invtransfn is
- * optional, NULL may be passed for invtransfnexpr.
+ * *invtransfnexpr. If there is no invtransfn, the respective pointer is set
+ * to NULL. Since use of the invtransfn is optional, NULL may be passed for
+ * invtransfnexpr.
*/
void
- build_aggregate_fnexprs(Oid *agg_input_types,
- int agg_num_inputs,
- int agg_num_direct_inputs,
- int num_finalfn_inputs,
- bool agg_variadic,
- Oid agg_state_type,
- #ifdef XCP
- Oid agg_collect_type,
- #endif
- Oid agg_result_type,
- Oid agg_input_collation,
- Oid transfn_oid,
- #ifdef XCP
- Oid collectfn_oid,
- #endif
- Oid invtransfn_oid,
- Oid finalfn_oid,
- Expr **transfnexpr,
- Expr **invtransfnexpr,
- #ifdef XCP
- Expr **collectfnexpr,
- #endif
- Expr **finalfnexpr)
+ build_aggregate_transfn_expr(Oid *agg_input_types,
+ int agg_num_inputs,
+ int agg_num_direct_inputs,
+ bool agg_variadic,
+ Oid agg_state_type,
++ Oid agg_collect_type,
+ Oid agg_input_collation,
+ Oid transfn_oid,
++ Oid collectfn_oid,
+ Oid invtransfn_oid,
+ Expr **transfnexpr,
- Expr **invtransfnexpr)
++ Expr **invtransfnexpr,
++ Expr **collectfnexpr)
{
- Param *argp;
List *args;
FuncExpr *fexpr;
int i;
else
*invtransfnexpr = NULL;
}
-
+#ifdef XCP
+ /* see if we have a collect function */
+ if (OidIsValid(collectfn_oid))
+ {
+ Param *argp2;
+ /*
+ * Build expr tree for collect function
+ */
+ argp = makeNode(Param);
+ argp->paramkind = PARAM_EXEC;
+ argp->paramid = -1;
+ argp->paramtype = agg_collect_type;
+ argp->paramtypmod = -1;
+ argp->location = -1;
+
+ argp2 = makeNode(Param);
+ argp2->paramkind = PARAM_EXEC;
+ argp2->paramid = -1;
+ argp2->paramtype = agg_state_type;
+ argp2->paramtypmod = -1;
+ argp2->location = -1;
+ args = list_make2(argp, argp2);
+
+ *collectfnexpr = (Expr *) makeFuncExpr(collectfn_oid,
+ agg_collect_type,
+ args,
+ InvalidOid,
+ agg_input_collation,
+ COERCE_EXPLICIT_CALL);
+ }
+ else
+ *collectfnexpr = NULL;
+#endif
+
+ /* see if we have a final function */
+ if (!OidIsValid(finalfn_oid))
+ {
+ *finalfnexpr = NULL;
+ return;
+ }
+ }
+
+ /*
+ * Like build_aggregate_transfn_expr, but creates an expression tree for the
+ * combine function of an aggregate, rather than the transition function.
+ */
+ void
+ build_aggregate_combinefn_expr(Oid agg_state_type,
+ Oid agg_input_collation,
+ Oid combinefn_oid,
+ Expr **combinefnexpr)
+ {
+ Node *argp;
+ List *args;
+ FuncExpr *fexpr;
+
+ /* combinefn takes two arguments of the aggregate state type */
+ argp = make_agg_arg(agg_state_type, agg_input_collation);
+
+ args = list_make2(argp, argp);
+
+ fexpr = makeFuncExpr(combinefn_oid,
+ agg_state_type,
+ args,
+ InvalidOid,
+ agg_input_collation,
+ COERCE_EXPLICIT_CALL);
+ /* combinefn is currently never treated as variadic */
+ *combinefnexpr = (Expr *) fexpr;
+ }
+
+ /*
+ * Like build_aggregate_transfn_expr, but creates an expression tree for the
+ * serialization function of an aggregate.
+ */
+ void
+ build_aggregate_serialfn_expr(Oid serialfn_oid,
+ Expr **serialfnexpr)
+ {
+ List *args;
+ FuncExpr *fexpr;
+
+ /* serialfn always takes INTERNAL and returns BYTEA */
+ args = list_make1(make_agg_arg(INTERNALOID, InvalidOid));
+
+ fexpr = makeFuncExpr(serialfn_oid,
+ BYTEAOID,
+ args,
+ InvalidOid,
+ InvalidOid,
+ COERCE_EXPLICIT_CALL);
+ *serialfnexpr = (Expr *) fexpr;
+ }
+
+ /*
+ * Like build_aggregate_transfn_expr, but creates an expression tree for the
+ * deserialization function of an aggregate.
+ */
+ void
+ build_aggregate_deserialfn_expr(Oid deserialfn_oid,
+ Expr **deserialfnexpr)
+ {
+ List *args;
+ FuncExpr *fexpr;
+
+ /* deserialfn always takes BYTEA, INTERNAL and returns INTERNAL */
+ args = list_make2(make_agg_arg(BYTEAOID, InvalidOid),
+ make_agg_arg(INTERNALOID, InvalidOid));
+
+ fexpr = makeFuncExpr(deserialfn_oid,
+ INTERNALOID,
+ args,
+ InvalidOid,
+ InvalidOid,
+ COERCE_EXPLICIT_CALL);
+ *deserialfnexpr = (Expr *) fexpr;
+ }
+
+ /*
+ * Like build_aggregate_transfn_expr, but creates an expression tree for the
+ * final function of an aggregate, rather than the transition function.
+ */
+ void
+ build_aggregate_finalfn_expr(Oid *agg_input_types,
+ int num_finalfn_inputs,
+ Oid agg_state_type,
+ Oid agg_result_type,
+ Oid agg_input_collation,
+ Oid finalfn_oid,
+ Expr **finalfnexpr)
+ {
+ List *args;
+ int i;
/*
* Build expr tree for final function
*/
- argp = makeNode(Param);
- argp->paramkind = PARAM_EXEC;
- argp->paramid = -1;
- /*
- * When running Phase 2 of distributed aggregation we may have only
- * transient and final functions defined.
- */
+#ifdef XCP
+ if (OidIsValid(agg_collect_type))
+ argp->paramtype = agg_collect_type;
+ else
+#endif
- argp->paramtype = agg_state_type;
- argp->paramtypmod = -1;
- argp->paramcollid = agg_input_collation;
- argp->location = -1;
- args = list_make1(argp);
+ args = list_make1(make_agg_arg(agg_state_type, agg_input_collation));
/* finalfn may take additional args, which match agg's input types */
for (i = 0; i < num_finalfn_inputs - 1; i++)
* parse_relation.c
* parser support routines dealing with relations
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* a quick copyObject() call before manipulating the query tree.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/backend/parser/parse_utilcmd.c
*
* there is a window (caused by pgstat delay) on which a worker may choose a
* table that was already vacuumed; this is a bug in the current design.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* - Add a pgstat config column to pg_database, so this
* entire thing can be enabled/disabled on a per db basis.
*
- * Copyright (c) 2001-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Copyright (c) 2001-2016, PostgreSQL Global Development Group
*
* src/backend/postmaster/pgstat.c
* ----------
* clients.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
*/
StartupPID = StartupDataBase();
Assert(StartupPID != 0);
+ StartupStatus = STARTUP_RUNNING;
pmState = PM_STARTUP;
+#ifdef PGXC /* PGXC_COORD */
+ oldcontext = MemoryContextSwitchTo(TopMemoryContext);
+
+ /*
+ * Initialize the Data Node connection pool
+ */
+ PgPoolerPID = StartPoolManager();
+
+ MemoryContextSwitchTo(oldcontext);
+#endif /* PGXC */
+
/* Some workers may be scheduled to start now */
maybe_start_bgworker();
if (PgStatPID == 0 && pmState == PM_RUN)
PgStatPID = pgstat_start();
+#ifdef PGXC
+ /* If we have lost the pooler, try to start a new one */
+ if (PgPoolerPID == 0 && pmState == PM_RUN)
+ PgPoolerPID = StartPoolManager();
+#endif /* PGXC */
+
+#ifdef XCP
+ /* If we have lost the cluster monitor, try to start a new one */
+ if (ClusterMonPID == 0 && pmState == PM_RUN)
+ ClusterMonPID = StartClusterMonitor();
+#endif
+
/* If we have lost the archiver, try to start a new one. */
if (PgArchPID == 0 && PgArchStartupAllowed())
- PgArchPID = pgarch_start();
+ PgArchPID = pgarch_start();
/* If we need to signal the autovacuum launcher, do so now */
if (avlauncher_needs_signal)
(errmsg("received SIGHUP, reloading configuration files")));
ProcessConfigFile(PGC_SIGHUP);
SignalChildren(SIGHUP);
- SignalUnconnectedWorkers(SIGHUP);
if (StartupPID != 0)
signal_child(StartupPID, SIGHUP);
+#ifdef PGXC /* PGXC_COORD */
+ if (PgPoolerPID != 0)
+ signal_child(PgPoolerPID, SIGHUP);
+#endif /* PGXC */
+#ifdef XCP
+ if (ClusterMonPID != 0)
+ signal_child(ClusterMonPID, SIGHUP);
+#endif
if (BgWriterPID != 0)
signal_child(BgWriterPID, SIGHUP);
if (CheckpointerPID != 0)
signal_child(BgWriterPID, SIGTERM);
if (WalReceiverPID != 0)
signal_child(WalReceiverPID, SIGTERM);
- SignalUnconnectedWorkers(SIGTERM);
+#ifdef XCP
+ /* and the pool manager too */
+ if (PgPoolerPID != 0)
+ signal_child(PgPoolerPID, SIGTERM);
+ /* and the cluster monitor too */
+ if (ClusterMonPID != 0)
+ signal_child(ClusterMonPID, SIGTERM);
+#endif /* XCP */
if (pmState == PM_RECOVERY)
{
+ SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER);
+
/*
- * Only startup, bgwriter, walreceiver, unconnected bgworkers,
+ * Only startup, bgwriter, walreceiver, possibly bgworkers,
* and/or checkpointer should be active in this state; we just
* signaled the first four, and we don't want to kill
* checkpointer yet.
* process.
*/
if (CountChildren(BACKEND_TYPE_NORMAL | BACKEND_TYPE_WORKER) == 0 &&
- CountUnconnectedWorkers() == 0 &&
StartupPID == 0 &&
+#ifdef PGXC
+ PgPoolerPID == 0 &&
+#endif
+#ifdef XCP
+ ClusterMonPID == 0 &&
+#endif
WalReceiverPID == 0 &&
BgWriterPID == 0 &&
(CheckpointerPID == 0 ||
{
SignalChildren(signal);
if (StartupPID != 0)
+ {
signal_child(StartupPID, signal);
+ if (signal == SIGQUIT || signal == SIGKILL)
+ StartupStatus = STARTUP_SIGNALED;
+ }
+#ifdef PGXC /* PGXC_COORD */
+ if (PgPoolerPID != 0)
+ signal_child(PgPoolerPID, SIGQUIT);
+#endif
+#ifdef XCP
+ if (ClusterMonPID != 0)
+ signal_child(ClusterMonPID, signal);
+#endif
if (BgWriterPID != 0)
signal_child(BgWriterPID, signal);
if (CheckpointerPID != 0)
case RM_SPGIST_ID:
case RM_BRIN_ID:
case RM_COMMIT_TS_ID:
+#ifdef PGXC
+ case RM_BARRIER_ID:
+#endif
case RM_REPLORIGIN_ID:
+ case RM_GENERIC_ID:
+ /* just deal with xid, and done */
+ ReorderBufferProcessXid(ctx->reorder, XLogRecGetXid(record),
+ buf.origptr);
break;
case RM_NEXT_ID:
elog(ERROR, "unexpected RM_NEXT_ID rmgr_id: %u", (RmgrIds) XLogRecGetRmid(buf.record));
#include "access/xlog_internal.h"
#include "access/xlogutils.h"
-
+ #include "access/xact.h"
#include "catalog/pg_type.h"
* bufmgr.c
* buffer manager interface routines
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* ipci.c
* POSTGRES inter-process communication initialization code.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include "storage/procsignal.h"
#include "storage/sinvaladt.h"
#include "storage/spin.h"
+#ifdef XCP
+#include "pgxc/pgxc.h"
+#include "pgxc/squeue.h"
+#include "pgxc/pause.h"
+#endif
+ #include "utils/snapmgr.h"
-
shmem_startup_hook_type shmem_startup_hook = NULL;
static Size total_addin_request = 0;
size = add_size(size, ReplicationOriginShmemSize());
size = add_size(size, WalSndShmemSize());
size = add_size(size, WalRcvShmemSize());
+#ifdef XCP
+ if (IS_PGXC_DATANODE)
+ size = add_size(size, SharedQueueShmemSize());
+ if (IS_PGXC_COORDINATOR)
+ size = add_size(size, ClusterLockShmemSize());
+ size = add_size(size, ClusterMonitorShmemSize());
+#endif
+ size = add_size(size, SnapMgrShmemSize());
size = add_size(size, BTreeShmemSize());
size = add_size(size, SyncScanShmemSize());
size = add_size(size, AsyncShmemSize());
* happen, it would tie up KnownAssignedXids indefinitely, so we protect
* ourselves by pruning the array when a valid list of running XIDs arrives.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
static TransactionId KnownAssignedXidsGetOldestXmin(void);
static void KnownAssignedXidsDisplay(int trace_level);
static void KnownAssignedXidsReset(void);
+ static inline void ProcArrayEndTransactionInternal(PGPROC *proc,
+ PGXACT *pgxact, TransactionId latestXid);
+ static void ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid);
+#ifdef XCP
+int GlobalSnapshotSource;
+#endif
+
/*
* Report shared-memory space needed by CreateSharedProcArray.
*/
* else is taking a snapshot. See discussion in
* src/backend/access/transam/README.
*/
+#ifdef PGXC
+ /*
+ * Remove this assertion. We have seen this failing because a ROLLBACK
+ * statement may get canceled by a Coordinator, leading to recursive
+ * abort of a transaction. This must be a PostgreSQL issue, highlighted
+ * by XC. See thread on hackers with subject "Canceling ROLLBACK
+ * statement"
+ */
+#else
Assert(TransactionIdIsValid(allPgXact[proc->pgprocno].xid));
+#endif
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
-
- pgxact->xid = InvalidTransactionId;
- proc->lxid = InvalidLocalTransactionId;
- pgxact->xmin = InvalidTransactionId;
- /* must be cleared with xid/xmin: */
- pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- pgxact->delayChkpt = false; /* be sure this is cleared in abort */
- proc->recoveryConflictPending = false;
-
- /* Clear the subtransaction-XID cache too while holding the lock */
- pgxact->nxids = 0;
- pgxact->overflowed = false;
-
- /* Also advance global latestCompletedXid while holding the lock */
- if (TransactionIdPrecedes(ShmemVariableCache->latestCompletedXid,
- latestXid))
- ShmemVariableCache->latestCompletedXid = latestXid;
-
- LWLockRelease(ProcArrayLock);
+ /*
+ * If we can immediately acquire ProcArrayLock, we clear our own XID
+ * and release the lock. If not, use group XID clearing to improve
+ * efficiency.
+ */
+ if (LWLockConditionalAcquire(ProcArrayLock, LW_EXCLUSIVE))
+ {
+ ProcArrayEndTransactionInternal(proc, pgxact, latestXid);
+ LWLockRelease(ProcArrayLock);
+ }
+ else
+ ProcArrayGroupClearXid(proc, latestXid);
}
else
{
* Routines for interprocess signalling
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* lock.c
* POSTGRES primary lock mechanism
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
}
/*
- * now check again for conflicts. 'otherLocks' describes the types of
- * locks held by other processes. If one of these conflicts with the kind
- * of lock that I want, there is a conflict and I have to sleep.
+ * Locks held in conflicting modes by members of our own lock group are
+ * not real conflicts; we can subtract those out and see if we still have
+ * a conflict. This is O(N) in the number of processes holding or
+ * awaiting locks on this object. We could improve that by making the
+ * shared memory state more complex (and larger) but it doesn't seem worth
+ * it.
*/
- if (!(lockMethodTable->conflictTab[lockmode] & otherLocks))
+ procLocks = &(lock->procLocks);
+ otherproclock = (PROCLOCK *)
+ SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
+ while (otherproclock != NULL)
{
- /* no conflict. OK to get the lock */
- PROCLOCK_PRINT("LockCheckConflicts: resolved", proclock);
- return STATUS_OK;
- }
+ if (proclock != otherproclock &&
+ proclock->groupLeader == otherproclock->groupLeader &&
+ (otherproclock->holdMask & conflictMask) != 0)
+ {
+ int intersectMask = otherproclock->holdMask & conflictMask;
+
+ for (i = 1; i <= numLockModes; i++)
+ {
+ if ((intersectMask & LOCKBIT_ON(i)) != 0)
+ {
+ if (conflictsRemaining[i] <= 0)
+ elog(PANIC, "proclocks held do not match lock");
+ conflictsRemaining[i]--;
+ totalConflictsRemaining--;
+ }
+ }
+ if (totalConflictsRemaining == 0)
+ {
+ PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
+ proclock);
+ return STATUS_OK;
+ }
+ }
+ otherproclock = (PROCLOCK *)
+ SHMQueueNext(procLocks, &otherproclock->lockLink,
+ offsetof(PROCLOCK, lockLink));
+ }
- PROCLOCK_PRINT("LockCheckConflicts: conflicting", proclock);
+#ifdef XCP
+ /*
+ * So the lock is conflicting with locks held by some other backend.
+ * But the backend may belong to the same distributed session. We need to
+ * detect such cases and either allow the lock or throw error, because
+ * waiting for the lock most probably would cause deadlock.
+ */
+ LWLockAcquire(ProcArrayLock, LW_SHARED);
+ if (proc->coordPid > 0)
+ {
+ /* Count locks held by this process and friends */
+ int myHolding[numLockModes + 1];
+ SHM_QUEUE *procLocks;
+ PROCLOCK *nextplock;
+
+ /* Initialize the counters */
+ for (i = 1; i <= numLockModes; i++)
+ myHolding[i] = 0;
+ otherLocks = 0;
+
+ /* Iterate over processes associated with the lock */
+ procLocks = &(lock->procLocks);
+
+ nextplock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
+ offsetof(PROCLOCK, lockLink));
+ while (nextplock)
+ {
+ PGPROC *nextproc = nextplock->tag.myProc;
+
+ if (nextproc->coordPid == proc->coordPid &&
+ nextproc->coordId == proc->coordId)
+ {
+ /*
+ * The process belongs to same distributed session, count locks
+ */
+ myLocks = nextplock->holdMask;
+ for (i = 1; i <= numLockModes; i++)
+ myHolding[i] += ((myLocks & LOCKBIT_ON(i)) ? 1 : 0);
+ }
+ /* get next proclock */
+ nextplock = (PROCLOCK *)
+ SHMQueueNext(procLocks, &nextplock->lockLink,
+ offsetof(PROCLOCK, lockLink));
+ }
+
+ /* Summarize locks held by other processes */
+ for (i = 1; i <= numLockModes; i++)
+ {
+ if (lock->granted[i] > myHolding[i])
+ otherLocks |= LOCKBIT_ON(i);
+ }
+
+ /*
+ * Yet another check.
+ */
+ if (!(lockMethodTable->conflictTab[lockmode] & otherLocks))
+ {
+ LWLockRelease(ProcArrayLock);
+ /* no conflict. OK to get the lock */
+ PROCLOCK_PRINT("LockCheckConflicts: resolved as held by friend",
+ proclock);
+#ifdef LOCK_DEBUG
+ elog(LOG, "Allow lock as held by the same distributed session [%u,%u] %s",
+ lock->tag.locktag_field1, lock->tag.locktag_field2,
+ lockMethodTable->lockModeNames[lockmode]);
+#endif
+ return STATUS_OK;
+ }
+ }
+ LWLockRelease(ProcArrayLock);
+#endif
+
+ /* Nope, it's a real conflict. */
+ PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
return STATUS_FOUND;
}
* locking should be done with the full lock manager --- which depends on
* LWLocks to protect its shared state.
*
- * In addition to exclusive and shared modes, lightweight locks can be used
- * to wait until a variable changes value. The variable is initially set
- * when the lock is acquired with LWLockAcquireWithVar, and can be updated
+ * In addition to exclusive and shared modes, lightweight locks can be used to
+ * wait until a variable changes value. The variable is initially not set
+ * when the lock is acquired with LWLockAcquire, i.e. it remains set to the
+ * value it was set to when the lock was released last, and can be updated
* without releasing the lock by calling LWLockUpdateVar. LWLockWaitForVar
- * waits for the variable to be updated, or until the lock is free. The
- * meaning of the variable is up to the caller, the lightweight lock code
- * just assigns and compares it.
+ * waits for the variable to be updated, or until the lock is free. When
+ * releasing the lock with LWLockReleaseClearVar() the value can be set to an
+ * appropriate value for a free lock. The meaning of the variable is up to
+ * the caller, the lightweight lock code just assigns and compares it.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
#include "utils/hsearch.h"
#endif
- /* We use the ShmemLock spinlock to protect LWLockAssign */
-
+ /* We use the ShmemLock spinlock to protect LWLockCounter */
extern slock_t *ShmemLock;
#define LW_FLAG_HAS_WAITERS ((uint32) 1 << 30)
/*
- * Compute number of LWLocks to allocate in the main array.
+ * Compute number of LWLocks required by named tranches. These will be
+ * allocated in the main array.
*/
static int
- NumLWLocks(void)
+ NumLWLocksByNamedTranches(void)
{
- int numLocks;
-
- /*
- * Possibly this logic should be spread out among the affected modules,
- * the same way that shmem space estimation is done. But for now, there
- * are few enough users of LWLocks that we can get away with just keeping
- * the knowledge here.
- */
-
- /* Predefined LWLocks */
- numLocks = NUM_FIXED_LWLOCKS;
-
- /* bufmgr.c needs two for each shared buffer */
- numLocks += 2 * NBuffers;
-
- /* proc.c needs one for each backend or auxiliary process */
- numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
-
- /* clog.c needs one per CLOG buffer */
- numLocks += CLOGShmemBuffers();
-
- /* commit_ts.c needs one per CommitTs buffer */
- numLocks += CommitTsShmemBuffers();
-
- /* subtrans.c needs one per SubTrans buffer */
- numLocks += NUM_SUBTRANS_BUFFERS;
-
- /* multixact.c needs two SLRU areas */
- numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;
-
- /* async.c needs one per Async buffer */
- numLocks += NUM_ASYNC_BUFFERS;
-
- /* predicate.c needs one per old serializable xid buffer */
- numLocks += NUM_OLDSERXID_BUFFERS;
+ int numLocks = 0;
+ int i;
- /* slot.c needs one for each slot */
- numLocks += max_replication_slots;
-
- /*
- * Add any requested by loadable modules; for backwards-compatibility
- * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if
- * there are no explicit requests.
- */
- lock_addin_request_allowed = false;
- numLocks += Max(lock_addin_request, NUM_USER_DEFINED_LWLOCKS);
+#ifdef XCP
+ /* squeue.c needs one per consumer node in each shared queue.
+ * Max number of consumers is MaxDataNodes-1 */
+ numLocks += NUM_SQUEUES * (MaxDataNodes-1);
+#endif
+ for (i = 0; i < NamedLWLockTrancheRequests; i++)
+ numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
return numLocks;
}
* proc.c
* routines to manage per-process shared memory data structure
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include "access/xact.h"
#include "miscadmin.h"
#include "postmaster/autovacuum.h"
+#ifdef PGXC
+#include "pgxc/pgxc.h"
+#include "pgxc/poolmgr.h"
+#endif
#include "replication/slot.h"
#include "replication/syncrep.h"
+ #include "storage/standby.h"
#include "storage/ipc.h"
#include "storage/lmgr.h"
#include "storage/pmsignal.h"
* support for communication destinations
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
#include "commands/createas.h"
#include "commands/matview.h"
#include "executor/functions.h"
+#ifdef XCP
+#include "executor/producerReceiver.h"
+#endif
+ #include "executor/tqueue.h"
#include "executor/tstoreReceiver.h"
#include "libpq/libpq.h"
#include "libpq/pqformat.h"
case DestSQLFunction:
return CreateSQLFunctionDestReceiver();
+#ifdef XCP
+ case DestProducer:
+ return CreateProducerDestReceiver();
+#endif
+
case DestTransientRel:
return CreateTransientRelDestReceiver(InvalidOid);
+
+ case DestTupleQueue:
+ return CreateTupleQueueDestReceiver(NULL);
}
/* should never get here */
case DestIntoRel:
case DestCopyOut:
case DestSQLFunction:
+ case DestProducer:
case DestTransientRel:
+ case DestTupleQueue:
break;
}
}
case DestIntoRel:
case DestCopyOut:
case DestSQLFunction:
+ case DestProducer:
case DestTransientRel:
+ case DestTupleQueue:
break;
}
}
case DestIntoRel:
case DestCopyOut:
case DestSQLFunction:
+ case DestProducer:
case DestTransientRel:
+ case DestTupleQueue:
break;
}
}
* postgres.c
* POSTGRES C Backend Interface
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
}
}
+ if (IdleInTransactionSessionTimeoutPending)
+ {
+ /* Has the timeout setting changed since last we looked? */
+ if (IdleInTransactionSessionTimeout > 0)
+ ereport(FATAL,
+ (errcode(ERRCODE_IDLE_IN_TRANSACTION_SESSION_TIMEOUT),
+ errmsg("terminating connection due to idle-in-transaction timeout")));
+ else
+ IdleInTransactionSessionTimeoutPending = false;
+
+ }
+
if (ParallelMessagePending)
HandleParallelMessages();
+
+ if (PoolerMessagesPending())
+ HandlePoolerMessages();
}
StringInfoData input_message;
sigjmp_buf local_sigjmp_buf;
volatile bool send_ready_for_query = true;
+ bool disable_idle_in_transaction_timeout = false;
+#ifdef PGXC /* PGXC_DATANODE */
+ /* Snapshot info */
+ TransactionId xmin;
+ TransactionId xmax;
+ int xcnt;
+ TransactionId *xip;
+ /* Timestamp info */
+ TimestampTz timestamp;
+
+ remoteConnType = REMOTE_CONN_APP;
+#endif
+
+#ifdef XCP
+ parentPGXCNode = NULL;
+ parentPGXCNodeId = -1;
+ parentPGXCNodeType = PGXC_NODE_DATANODE;
+ cluster_lock_held = false;
+ cluster_ex_lock_held = false;
+#endif /* XCP */
+
/* Initialize startup process environment if necessary. */
if (!IsUnderPostmaster)
InitStandaloneProcess(argv[0]);
* pquery.c
* POSTGRES process query command code
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
{
QueryDesc *queryDesc;
ScanDirection direction;
- uint32 nprocessed;
+ uint64 nprocessed;
+ struct rusage start_r;
+ struct timeval start_t;
+ if (log_executor_stats)
+ ResetUsageCommon(&start_r, &start_t);
/*
* NB: queryDesc will be NULL if we are fetching from a held cursor or a
* completed utility query; can't use it in that path.
IsA(utilityStmt, ListenStmt) ||
IsA(utilityStmt, NotifyStmt) ||
IsA(utilityStmt, UnlistenStmt) ||
+#ifdef PGXC
+ IsA(utilityStmt, PauseClusterStmt) ||
+ IsA(utilityStmt, BarrierStmt) ||
+ (IsA(utilityStmt, CheckPointStmt) && IS_PGXC_DATANODE)))
+#else
IsA(utilityStmt, CheckPointStmt)))
+#endif
{
- PushActiveSnapshot(GetTransactionSnapshot());
- active_snapshot_set = true;
+ snapshot = GetTransactionSnapshot();
+ /* If told to, register the snapshot we're using and save in portal */
+ if (setHoldSnapshot)
+ {
+ snapshot = RegisterSnapshot(snapshot);
+ portal->holdSnapshot = snapshot;
+ }
+ PushActiveSnapshot(snapshot);
+ /* PushActiveSnapshot might have copied the snapshot */
+ snapshot = GetActiveSnapshot();
}
else
- active_snapshot_set = false;
+ snapshot = NULL;
ProcessUtility(utilityStmt,
portal->sourceText,
portal->atStart = true;
portal->atEnd = false;
portal->portalPos = 0;
- portal->posOverflow = false;
}
+
+#ifdef XCP
+/*
+ * Execute the specified portal's query and distribute tuples to consumers.
+ * Returs 1 if portal should keep producing, 0 if all consumers have enough
+ * rows in the buffers to pause producing temporarily, -1 if the query is
+ * completed.
+ */
+int
+AdvanceProducingPortal(Portal portal, bool can_wait)
+{
+ Portal saveActivePortal;
+ ResourceOwner saveResourceOwner;
+ MemoryContext savePortalContext;
+ MemoryContext oldContext;
+ QueryDesc *queryDesc;
+ SharedQueue squeue;
+ DestReceiver *treceiver;
+ int result;
+
+ queryDesc = PortalGetQueryDesc(portal);
+ squeue = queryDesc->squeue;
+
+ Assert(queryDesc);
+ /* Make sure the portal is producing */
+ Assert(squeue && queryDesc->myindex == -1);
+ /* Make sure there is proper receiver */
+ Assert(queryDesc->dest && queryDesc->dest->mydest == DestProducer);
+
+ /*
+ * Set up global portal context pointers.
+ */
+ saveActivePortal = ActivePortal;
+ saveResourceOwner = CurrentResourceOwner;
+ savePortalContext = PortalContext;
+ PG_TRY();
+ {
+ ActivePortal = portal;
+ CurrentResourceOwner = portal->resowner;
+ PortalContext = PortalGetHeapMemory(portal);
+
+ oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
+
+ /*
+ * That is the first pass thru if the hold store is not initialized yet,
+ * Need to initialize stuff.
+ */
+ if (portal->holdStore == NULL && portal->status != PORTAL_FAILED)
+ {
+ int idx;
+ char storename[64];
+
+ PortalCreateProducerStore(portal);
+ treceiver = CreateDestReceiver(DestTuplestore);
+ SetTuplestoreDestReceiverParams(treceiver,
+ portal->holdStore,
+ portal->holdContext,
+ false);
+ SetSelfConsumerDestReceiver(queryDesc->dest, treceiver);
+ SetProducerTempMemory(queryDesc->dest, portal->tmpContext);
+ snprintf(storename, 64, "%s producer store", portal->name);
+ tuplestore_collect_stat(portal->holdStore, storename);
+ /*
+ * Tuplestore does not clear eof flag on the active read pointer,
+ * causing the store is always in EOF state once reached when
+ * there is a single read pointer. We do not want behavior like this
+ * and workaround by using secondary read pointer.
+ * Primary read pointer (0) is active when we are writing to
+ * the tuple store, secondary read pointer is for reading, and its
+ * eof flag is cleared if a tuple is written to the store.
+ * We know the extra read pointer has index 1, so do not store it.
+ */
+ idx = tuplestore_alloc_read_pointer(portal->holdStore, 0);
+ Assert(idx == 1);
+ }
+
+ if (queryDesc->estate && !queryDesc->estate->es_finished &&
+ portal->status != PORTAL_FAILED)
+ {
+ /*
+ * If the portal's hold store has tuples available for read and
+ * all consumer queues are not empty we skip advancing the portal
+ * (pause it) to prevent buffering too many rows at the producer.
+ * NB just created portal store would not be in EOF state, but in
+ * this case consumer queues will be empty and do not allow
+ * erroneous pause. After the first call to AdvanceProducingPortal
+ * portal will try to read the hold store and EOF flag will be set
+ * correctly.
+ */
+ tuplestore_select_read_pointer(portal->holdStore, 1);
+ if (!tuplestore_ateof(portal->holdStore) &&
+ SharedQueueCanPause(squeue))
+ result = 0;
+ else
+ result = 1;
+ tuplestore_select_read_pointer(portal->holdStore, 0);
+
+ if (result)
+ {
+ /* Execute query and dispatch tuples via dest receiver */
+#define PRODUCE_TUPLES 100
+ PushActiveSnapshot(queryDesc->snapshot);
+ ExecutorRun(queryDesc, ForwardScanDirection, PRODUCE_TUPLES);
+ PopActiveSnapshot();
+
+ if (queryDesc->estate->es_processed < PRODUCE_TUPLES)
+ {
+ /*
+ * Finish the executor, but we may still have some tuples
+ * in the local storages.
+ * We should keep trying pushing them into the squeue, so do not
+ * remove the portal from the list of producers.
+ */
+ ExecutorFinish(queryDesc);
+ }
+ }
+ }
+
+ /* Try to dump local tuplestores */
+ if ((queryDesc->estate == NULL || queryDesc->estate->es_finished) &&
+ ProducerReceiverPushBuffers(queryDesc->dest))
+ {
+ if (can_wait && queryDesc->estate == NULL)
+ {
+ (*queryDesc->dest->rDestroy) (queryDesc->dest);
+ queryDesc->dest = NULL;
+ portal->queryDesc = NULL;
+ squeue = NULL;
+
+ removeProducingPortal(portal);
+ FreeQueryDesc(queryDesc);
+
+ /*
+ * Current context is the portal context, which is going
+ * to be deleted
+ */
+ MemoryContextSwitchTo(TopTransactionContext);
+
+ ActivePortal = saveActivePortal;
+ CurrentResourceOwner = saveResourceOwner;
+ PortalContext = savePortalContext;
+
+ if (portal->resowner)
+ {
+ bool isCommit = (portal->status != PORTAL_FAILED);
+
+ ResourceOwnerRelease(portal->resowner,
+ RESOURCE_RELEASE_BEFORE_LOCKS,
+ isCommit, false);
+ ResourceOwnerRelease(portal->resowner,
+ RESOURCE_RELEASE_LOCKS,
+ isCommit, false);
+ ResourceOwnerRelease(portal->resowner,
+ RESOURCE_RELEASE_AFTER_LOCKS,
+ isCommit, false);
+ ResourceOwnerDelete(portal->resowner);
+ }
+ portal->resowner = NULL;
+
+ /*
+ * Delete tuplestore if present. We should do this even under error
+ * conditions; since the tuplestore would have been using cross-
+ * transaction storage, its temp files need to be explicitly deleted.
+ */
+ if (portal->holdStore)
+ {
+ MemoryContext oldcontext;
+
+ oldcontext = MemoryContextSwitchTo(portal->holdContext);
+ tuplestore_end(portal->holdStore);
+ MemoryContextSwitchTo(oldcontext);
+ portal->holdStore = NULL;
+ }
+
+ /* delete tuplestore storage, if any */
+ if (portal->holdContext)
+ MemoryContextDelete(portal->holdContext);
+
+ /* release subsidiary storage */
+ MemoryContextDelete(PortalGetHeapMemory(portal));
+
+ /* release portal struct (it's in PortalMemory) */
+ pfree(portal);
+ }
+ /* report portal is not producing */
+ result = -1;
+ }
+ else
+ {
+ result = SharedQueueCanPause(queryDesc->squeue) ? 0 : 1;
+ }
+ }
+ PG_CATCH();
+ {
+ /* Uncaught error while executing portal: mark it dead */
+ portal->status = PORTAL_FAILED;
+ /*
+ * Reset producer to allow consumers to finish, so receiving node will
+ * handle the error.
+ */
+ if (squeue)
+ SharedQueueReset(squeue, -1);
+
+ /* Restore global vars and propagate error */
+ ActivePortal = saveActivePortal;
+ CurrentResourceOwner = saveResourceOwner;
+ PortalContext = savePortalContext;
+
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ MemoryContextSwitchTo(oldContext);
+
+ ActivePortal = saveActivePortal;
+ CurrentResourceOwner = saveResourceOwner;
+ PortalContext = savePortalContext;
+
+ return result;
+}
+
+
+/*
+ * Iterate over producing portal, determine already closed, and clean them up,
+ * waiting while consumers finish their work. Closed producers should be
+ * cleaned up and resources are released before proceeding with handling of
+ * next request.
+ */
+void
+cleanupClosedProducers(void)
+{
+ ListCell *lc = list_head(getProducingPortals());
+ while (lc)
+ {
+ Portal p = (Portal) lfirst(lc);
+ QueryDesc *queryDesc = PortalGetQueryDesc(p);
+ SharedQueue squeue = queryDesc->squeue;
+
+ /*
+ * Get next already, because next call may remove cell from
+ * the list and invalidate next reference
+ */
+ lc = lnext(lc);
+
+ /* When portal is closed executor state is not set */
+ if (queryDesc->estate == NULL)
+ {
+ /*
+ * Set up global portal context pointers.
+ */
+ Portal saveActivePortal = ActivePortal;
+ ResourceOwner saveResourceOwner = CurrentResourceOwner;
+ MemoryContext savePortalContext = PortalContext;
+
+ PG_TRY();
+ {
+ MemoryContext oldContext;
+ ActivePortal = p;
+ CurrentResourceOwner = p->resowner;
+ PortalContext = PortalGetHeapMemory(p);
+
+ oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(p));
+
+ (*queryDesc->dest->rDestroy) (queryDesc->dest);
+ queryDesc->dest = NULL;
+ p->queryDesc = NULL;
+ squeue = NULL;
+
+ removeProducingPortal(p);
+ FreeQueryDesc(queryDesc);
+
+ /*
+ * Current context is the portal context, which is going
+ * to be deleted
+ */
+ MemoryContextSwitchTo(TopTransactionContext);
+
+ ActivePortal = saveActivePortal;
+ CurrentResourceOwner = saveResourceOwner;
+ PortalContext = savePortalContext;
+
+ if (p->resowner)
+ {
+ bool isCommit = (p->status != PORTAL_FAILED);
+
+ ResourceOwnerRelease(p->resowner,
+ RESOURCE_RELEASE_BEFORE_LOCKS,
+ isCommit, false);
+ ResourceOwnerRelease(p->resowner,
+ RESOURCE_RELEASE_LOCKS,
+ isCommit, false);
+ ResourceOwnerRelease(p->resowner,
+ RESOURCE_RELEASE_AFTER_LOCKS,
+ isCommit, false);
+ ResourceOwnerDelete(p->resowner);
+ }
+ p->resowner = NULL;
+
+ /*
+ * Delete tuplestore if present. We should do this even under error
+ * conditions; since the tuplestore would have been using cross-
+ * transaction storage, its temp files need to be explicitly deleted.
+ */
+ if (p->holdStore)
+ {
+ MemoryContext oldcontext;
+
+ oldcontext = MemoryContextSwitchTo(p->holdContext);
+ tuplestore_end(p->holdStore);
+ MemoryContextSwitchTo(oldcontext);
+ p->holdStore = NULL;
+ }
+
+ /* delete tuplestore storage, if any */
+ if (p->holdContext)
+ MemoryContextDelete(p->holdContext);
+
+ /* release subsidiary storage */
+ MemoryContextDelete(PortalGetHeapMemory(p));
+
+ /* release portal struct (it's in PortalMemory) */
+ pfree(p);
+
+ MemoryContextSwitchTo(oldContext);
+ }
+ PG_CATCH();
+ {
+ /* Uncaught error while executing portal: mark it dead */
+ p->status = PORTAL_FAILED;
+ /*
+ * Reset producer to allow consumers to finish, so receiving node will
+ * handle the error.
+ */
+ if (squeue)
+ SharedQueueReset(squeue, -1);
+
+ /* Restore global vars and propagate error */
+ ActivePortal = saveActivePortal;
+ CurrentResourceOwner = saveResourceOwner;
+ PortalContext = savePortalContext;
+
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ ActivePortal = saveActivePortal;
+ CurrentResourceOwner = saveResourceOwner;
+ PortalContext = savePortalContext;
+ }
+ }
+}
+#endif
* commands. At one time acted as an interface between the Lisp and C
* systems.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
else
ExecRenameStmt(stmt);
}
+#ifdef PGXC
+ if (IS_PGXC_LOCAL_COORDINATOR)
+ {
+ RenameStmt *stmt = (RenameStmt *) parsetree;
+ RemoteQueryExecType exec_type;
+ bool is_temp = false;
+
+ /* Try to use the object relation if possible */
+ if (stmt->relation)
+ {
+ /*
+ * When a relation is defined, it is possible that this object does
+ * not exist but an IF EXISTS clause might be used. So we do not do
+ * any error check here but block the access to remote nodes to
+ * this object as it does not exisy
+ */
+ Oid relid = RangeVarGetRelid(stmt->relation, NoLock, true);
+
+ if (OidIsValid(relid))
+ exec_type = ExecUtilityFindNodes(stmt->renameType,
+ relid,
+ &is_temp);
+ else
+ exec_type = EXEC_ON_NONE;
+ }
+ else
+ {
+ exec_type = ExecUtilityFindNodes(stmt->renameType,
+ InvalidOid,
+ &is_temp);
+ }
+
+ ExecUtilityStmtOnNodes(queryString,
+ NULL,
+ sentToRemote,
+ false,
+ exec_type,
+ is_temp);
+ }
+#endif
break;
+ case T_AlterObjectDependsStmt:
+ {
+ AlterObjectDependsStmt *stmt = (AlterObjectDependsStmt *) parsetree;
+
+ if (EventTriggerSupportsObjectType(stmt->objectType))
+ ProcessUtilitySlow(parsetree, queryString,
+ context, params,
+ dest, completionTag);
+ else
+ ExecAlterObjectDependsStmt(stmt, NULL);
+ }
+ break;
+
case T_AlterObjectSchemaStmt:
{
AlterObjectSchemaStmt *stmt = (AlterObjectSchemaStmt *) parsetree;
* arrayfuncs.c
* Support functions for arrays.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* date.c
* implements DATE and TIME data types specified in SQL standard
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
*
* dbsize.c
* Database object size functions, and related inquiries
*
- * Copyright (c) 2002-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Copyright (c) 2002-2016, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/backend/utils/adt/dbsize.c
#include "utils/rel.h"
#include "utils/relfilenodemap.h"
#include "utils/relmapper.h"
+#include "utils/lsyscache.h"
#include "utils/syscache.h"
+#ifdef XCP
+#include "catalog/pg_type.h"
+#include "catalog/pgxc_node.h"
+#include "executor/executor.h"
+#include "nodes/makefuncs.h"
+#include "pgxc/execRemote.h"
+#include "utils/snapmgr.h"
+#endif
+
+#ifdef PGXC
+static Datum pgxc_database_size(Oid dbOid);
+static Datum pgxc_tablespace_size(Oid tbOid);
+static int64 pgxc_exec_sizefunc(Oid relOid, char *funcname, char *extra_arg);
+/*
+ * Below macro is important when the object size functions are called
+ * for system catalog tables. For pg_catalog tables and other Coordinator-only
+ * tables, we should return the data from Coordinator. If we don't find
+ * locator info, that means it is a Coordinator-only table.
+ */
+#define COLLECT_FROM_DATANODES(relid) \
+ (IS_PGXC_LOCAL_COORDINATOR && \
+ (GetRelationLocInfo((relid)) != NULL))
+#endif
+ /* Divide by two and round towards positive infinity. */
+ #define half_rounded(x) (((x) + ((x) < 0 ? 0 : 1)) / 2)
/* Return physical size of directory contents, or 0 if dir doesn't exist */
static int64
break;
case RELPERSISTENCE_TEMP:
if (isTempOrTempToastNamespace(relform->relnamespace))
- backend = MyBackendId;
+#ifdef XCP
+ backend = OidIsValid(MyCoordId) ? InvalidBackendId : MyBackendId;
+#else
+ backend = BackendIdForTempRelations();
+#endif
else
{
/* Do it the hard way. */
}
/* set up the accumulator on the first go round */
--
if (PG_ARGISNULL(0))
{
- Oid arg_type;
+ Oid arg_type;
oldcontext = MemoryContextSwitchTo(aggcontext);
state = palloc(sizeof(JsonbAggState));
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "miscadmin.h"
+#ifdef PGXC
+#include "pgxc/pgxc.h"
+#include "pgxc/pgxcnode.h"
+#include "pgxc/nodemgr.h"
+#include "executor/spi.h"
+#include "tcop/utility.h"
+#endif
#include "storage/predicate_internals.h"
+ #include "utils/array.h"
#include "utils/builtins.h"
return state;
}
+/*
+ * numeric_agg_state_in() -
+ *
+ * Input function for numeric_agg_state data type
+ */
+Datum
+numeric_agg_state_in(PG_FUNCTION_ARGS)
+{
+ char *str = pstrdup(PG_GETARG_CSTRING(0));
+ NumericAggState *state;
+ char *token;
+
+ state = (NumericAggState *) palloc0(sizeof (NumericAggState));
+ init_var(&state->sumX);
+
+ token = strtok(str, ":");
+ state->calcSumX2 = (*token == 't');
+
+ token = strtok(NULL, ":");
+ state->N = DatumGetInt64(DirectFunctionCall1(int8in,CStringGetDatum(token)));
+
+ token = strtok(NULL, ":");
+ set_var_from_str(token, token, &state->sumX);
+
+ token = strtok(NULL, ":");
+ if (state->calcSumX2)
+ {
+ init_var(&state->sumX2);
+ set_var_from_str(token, token, &state->sumX2);
+ }
+
+ token = strtok(NULL, ":");
+ state->maxScale = DatumGetInt32(DirectFunctionCall1(int4in,CStringGetDatum(token)));
+
+ token = strtok(NULL, ":");
+ state->maxScaleCount = DatumGetInt64(DirectFunctionCall1(int8in,CStringGetDatum(token)));
+
+ token = strtok(NULL, ":");
+ state->NaNcount = DatumGetInt64(DirectFunctionCall1(int8in,CStringGetDatum(token)));
+
+ pfree(str);
+
+ PG_RETURN_POINTER(state);
+}
+
+/*
+ * numeric_agg_state_out() -
+ *
+ * Output function for numeric_agg_state data type
+ */
+Datum
+numeric_agg_state_out(PG_FUNCTION_ARGS)
+{
+ NumericAggState *state = (NumericAggState *) PG_GETARG_POINTER(0);
+ char *sumX_str, *sumX2_str, *N_str,
+ *maxScale_str, *maxScaleCount_str,
+ *NaNcount_str;
+ char *result;
+ int len;
+
+ sumX_str = get_str_from_var(&state->sumX);
+ if (state->calcSumX2)
+ sumX2_str = get_str_from_var(&state->sumX2);
+ else
+ sumX2_str = "0";
+
+ N_str = DatumGetCString(DirectFunctionCall1(int8out,
+ Int64GetDatum(state->N)));
+ maxScaleCount_str = DatumGetCString(DirectFunctionCall1(int8out,
+ Int64GetDatum(state->maxScaleCount)));
+ NaNcount_str = DatumGetCString(DirectFunctionCall1(int8out,
+ Int64GetDatum(state->NaNcount)));
+ maxScale_str = DatumGetCString(DirectFunctionCall1(int4out,
+ Int32GetDatum(state->maxScale)));
+
+ len = 1 + strlen(N_str) + strlen(sumX_str) + strlen(sumX2_str) +
+ strlen(maxScale_str) + strlen(maxScaleCount_str) +
+ strlen(NaNcount_str) + 7;
+
+ result = (char *) palloc0(len);
+
+ snprintf(result, len, "%c:%s:%s:%s:%s:%s:%s",
+ state->calcSumX2 ? 't' : 'f',
+ N_str, sumX_str, sumX2_str,
+ maxScale_str, maxScaleCount_str, NaNcount_str);
+
+ pfree(N_str);
+ pfree(sumX_str);
+ if (state->calcSumX2)
+ pfree(sumX2_str);
+ pfree(maxScale_str);
+ pfree(maxScaleCount_str);
+ pfree(NaNcount_str);
+
+ PG_RETURN_CSTRING(result);
+}
+
+/*
+ * numeric_agg_state_recv - converts binary format to numeric_agg_state
+ */
+Datum
+numeric_agg_state_recv(PG_FUNCTION_ARGS)
+{
+ StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+ NumericAggState *state;
+ int len;
+ int i;
+
+ state = (NumericAggState *) palloc0(sizeof (NumericAggState));
+
+ state->calcSumX2 = pq_getmsgbyte(buf);
+ state->N = pq_getmsgint(buf, sizeof (int64));
+
+ len = (uint16) pq_getmsgint(buf, sizeof(uint16));
+ if (len < 0 || len > NUMERIC_MAX_PRECISION + NUMERIC_MAX_RESULT_SCALE)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
+ errmsg("invalid length in external \"numeric\" value")));
+
+ alloc_var(&state->sumX, len);
+
+ state->sumX.weight = (int16) pq_getmsgint(buf, sizeof(int16));
+ state->sumX.sign = (uint16) pq_getmsgint(buf, sizeof(uint16));
+ if (!(state->sumX.sign == NUMERIC_POS ||
+ state->sumX.sign == NUMERIC_NEG ||
+ state->sumX.sign == NUMERIC_NAN))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
+ errmsg("invalid sign in external \"numeric\" value")));
+
+ state->sumX.dscale = (uint16) pq_getmsgint(buf, sizeof(uint16));
+ for (i = 0; i < len; i++)
+ {
+ NumericDigit d = pq_getmsgint(buf, sizeof(NumericDigit));
+
+ if (d < 0 || d >= NBASE)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
+ errmsg("invalid digit in external \"numeric\" value")));
+ state->sumX.digits[i] = d;
+ }
+
+ if (state->calcSumX2)
+ {
+ len = (uint16) pq_getmsgint(buf, sizeof(uint16));
+ if (len < 0 || len > NUMERIC_MAX_PRECISION + NUMERIC_MAX_RESULT_SCALE)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
+ errmsg("invalid length in external \"numeric\" value")));
+
+ alloc_var(&state->sumX2, len);
+
+ state->sumX2.weight = (int16) pq_getmsgint(buf, sizeof(int16));
+ state->sumX2.sign = (uint16) pq_getmsgint(buf, sizeof(uint16));
+ if (!(state->sumX2.sign == NUMERIC_POS ||
+ state->sumX2.sign == NUMERIC_NEG ||
+ state->sumX2.sign == NUMERIC_NAN))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
+ errmsg("invalid sign in external \"numeric\" value")));
+
+ state->sumX2.dscale = (uint16) pq_getmsgint(buf, sizeof(uint16));
+ for (i = 0; i < len; i++)
+ {
+ NumericDigit d = pq_getmsgint(buf, sizeof(NumericDigit));
+
+ if (d < 0 || d >= NBASE)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
+ errmsg("invalid digit in external \"numeric\" value")));
+ state->sumX2.digits[i] = d;
+ }
+ }
+ state->maxScale = pq_getmsgint(buf, sizeof (int));
+ state->maxScaleCount = pq_getmsgint(buf, sizeof (int64));
+ state->NaNcount = pq_getmsgint(buf, sizeof (int64));
+
+ PG_RETURN_POINTER(state);
+}
+
+/*
+ * numeric_agg_state_send - converts numeric_agg_state to binary format
+ */
+Datum
+numeric_agg_state_send(PG_FUNCTION_ARGS)
+{
+ NumericAggState *state = (NumericAggState *) PG_GETARG_POINTER(0);
+ StringInfoData buf;
+ int i;
+
+ pq_begintypsend(&buf);
+
+ pq_sendbyte(&buf, state->calcSumX2);
+ pq_sendint(&buf, state->N, sizeof (int64));
+
+ pq_sendint(&buf, state->sumX.ndigits, sizeof(int16));
+ pq_sendint(&buf, state->sumX.weight, sizeof(int16));
+ pq_sendint(&buf, state->sumX.sign, sizeof(int16));
+ pq_sendint(&buf, state->sumX.dscale, sizeof(int16));
+ for (i = 0; i < state->sumX.ndigits; i++)
+ pq_sendint(&buf, state->sumX.digits[i], sizeof(NumericDigit));
+
+ if (state->calcSumX2)
+ {
+ pq_sendint(&buf, state->sumX2.ndigits, sizeof(int16));
+ pq_sendint(&buf, state->sumX2.weight, sizeof(int16));
+ pq_sendint(&buf, state->sumX2.sign, sizeof(int16));
+ pq_sendint(&buf, state->sumX2.dscale, sizeof(int16));
+ for (i = 0; i < state->sumX2.ndigits; i++)
+ pq_sendint(&buf, state->sumX2.digits[i], sizeof(NumericDigit));
+ }
+
+ pq_sendint(&buf, state->maxScale, sizeof (int));
+ pq_sendint(&buf, state->maxScaleCount, sizeof (int64));
+ pq_sendint(&buf, state->NaNcount, sizeof (int64));
+
+ PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
+}
+
+ /*
+ * Like makeNumericAggState(), but allocate the state in the current memory
+ * context.
+ */
+ static NumericAggState *
+ makeNumericAggStateCurrentContext(bool calcSumX2)
+ {
+ NumericAggState *state;
+
+ state = (NumericAggState *) palloc0(sizeof(NumericAggState));
+ state->calcSumX2 = calcSumX2;
+ state->agg_context = CurrentMemoryContext;
+
+ return state;
+ }
+
/*
* Accumulate a new input value for numeric aggregate functions.
*/
* we do better?)
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* plan --- consider improving this someday.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
*
* src/backend/utils/adt/ri_triggers.c
*
/* Switch to proper UID to perform check as */
GetUserIdAndSecContext(&save_userid, &save_sec_context);
SetUserIdAndSecContext(RelationGetForm(query_rel)->relowner,
- save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
+ save_sec_context | SECURITY_LOCAL_USERID_CHANGE |
- SECURITY_NOFORCE_RLS);
/* Create the plan */
qplan = SPI_prepare(querystr, nargs, argtypes);
* Functions to convert stored expressions/querytrees back to
* source text
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include <unistd.h>
#include <fcntl.h>
+#ifdef PGXC
+#include "access/reloptions.h"
+#endif /* PGXC */
+ #include "access/amapi.h"
#include "access/htup_details.h"
#include "access/sysattr.h"
#include "catalog/dependency.h"
#include "catalog/indexing.h"
#include "catalog/pg_aggregate.h"
+ #include "catalog/pg_am.h"
#include "catalog/pg_authid.h"
+#ifdef PGXC
+#include "catalog/pg_aggregate.h"
+#endif /* PGXC */
#include "catalog/pg_collation.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_depend.h"
#include "catalog/pg_type.h"
#include "commands/defrem.h"
#include "commands/tablespace.h"
+ #include "common/keywords.h"
#include "executor/spi.h"
#include "funcapi.h"
+#ifdef PGXC
+#include "nodes/execnodes.h"
+#endif
+ #include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
heap_close(ev_relation, AccessShareLock);
}
-
+#ifdef PGXC
+/* ----------
+ * deparse_query - Parse back one query parsetree
+ *
+ * Purpose of this function is to build up statement for a RemoteQuery
+ * It just calls get_query_def without pretty print flags
+ * ----------
+ */
+void
+deparse_query(Query *query, StringInfo buf, List *parentnamespace,
+ bool finalise_aggs, bool sortgroup_colno)
+{
+ get_query_def(query, buf, parentnamespace, NULL, 0, 0, 0, finalise_aggs,
+ sortgroup_colno);
+}
+
+/* code borrowed from get_insert_query_def */
+void
+get_query_def_from_valuesList(Query *query, StringInfo buf)
+{
+
+ RangeTblEntry *select_rte = NULL;
+ RangeTblEntry *values_rte = NULL;
+ RangeTblEntry *rte;
+ char *sep;
+ ListCell *values_cell;
+ ListCell *l;
+ List *strippedexprs;
+ deparse_context context;
+ deparse_namespace dpns;
+
+ /*
+ * Before we begin to examine the query, acquire locks on referenced
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
+ * querytree!
+ */
+ AcquireRewriteLocks(query, false, false);
+
+ context.buf = buf;
+ context.namespaces = NIL;
+ context.windowClause = NIL;
+ context.windowTList = NIL;
+ context.varprefix = (list_length(query->rtable) != 1);
+ context.prettyFlags = 0;
+ context.indentLevel = 0;
+ context.wrapColumn = 0;
+
+ dpns.rtable = query->rtable;
+ dpns.ctes = query->cteList;
+ dpns.planstate = NULL;
+ dpns.ancestors = NIL;
+ dpns.outer_planstate = dpns.inner_planstate = NULL;
+
+ /*
+ * If it's an INSERT ... SELECT or VALUES (...), (...), ... there will be
+ * a single RTE for the SELECT or VALUES.
+ */
+ foreach(l, query->rtable)
+ {
+ rte = (RangeTblEntry *) lfirst(l);
+
+ if (rte->rtekind == RTE_SUBQUERY)
+ {
+ if (select_rte)
+ elog(ERROR, "too many subquery RTEs in INSERT");
+ select_rte = rte;
+ }
+
+ if (rte->rtekind == RTE_VALUES)
+ {
+ if (values_rte)
+ elog(ERROR, "too many values RTEs in INSERT");
+ values_rte = rte;
+ }
+ }
+ if (select_rte && values_rte)
+ elog(ERROR, "both subquery and values RTEs in INSERT");
+
+ /*
+ * Start the query with INSERT INTO relname
+ */
+ rte = rt_fetch(query->resultRelation, query->rtable);
+ Assert(rte->rtekind == RTE_RELATION);
+
+ appendStringInfo(buf, "INSERT INTO %s (",
+ generate_relation_name(rte->relid, NIL));
+
+ /*
+ * Add the insert-column-names list. To handle indirection properly, we
+ * need to look for indirection nodes in the top targetlist (if it's
+ * INSERT ... SELECT or INSERT ... single VALUES), or in the first
+ * expression list of the VALUES RTE (if it's INSERT ... multi VALUES). We
+ * assume that all the expression lists will have similar indirection in
+ * the latter case.
+ */
+ if (values_rte)
+ values_cell = list_head((List *) linitial(values_rte->values_lists));
+ else
+ values_cell = NULL;
+ strippedexprs = NIL;
+ sep = "";
+ foreach(l, query->targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ elog(DEBUG1, "targetEntry type is %d\n)", tle->expr->type);
+ if (tle->resjunk || !IsA(tle->expr, Var))
+ continue; /* ignore junk entries */
+
+ appendStringInfoString(buf, sep);
+ sep = ", ";
+
+ /*
+ * Put out name of target column; look in the catalogs, not at
+ * tle->resname, since resname will fail to track RENAME.
+ */
+ appendStringInfoString(buf,quote_identifier(get_relid_attribute_name(rte->relid, tle->resno)));
+
+ /*
+ * Print any indirection needed (subfields or subscripts), and strip
+ * off the top-level nodes representing the indirection assignments.
+ */
+ if (values_cell)
+ {
+ /* we discard the stripped expression in this case */
+ processIndirection((Node *) lfirst(values_cell), &context, true);
+ values_cell = lnext(values_cell);
+ }
+ else
+ {
+ /* we keep a list of the stripped expressions in this case */
+ strippedexprs = lappend(strippedexprs, processIndirection((Node *) tle->expr, &context, true));
+ }
+ }
+ appendStringInfo(buf, ") ");
+
+ if (select_rte)
+ {
+ /* Add the SELECT */
+ get_query_def(select_rte->subquery, buf, NIL, NULL,
+ context.prettyFlags, context.wrapColumn,
+ context.indentLevel,
+ context.finalise_aggs, context.sortgroup_colno);
+ }
+ else if (values_rte)
+ {
+ /* A WITH clause is possible here */
+ get_with_clause(query, &context);
+ /* Add the multi-VALUES expression lists */
+ get_values_def(values_rte->values_lists, &context);
+ }
+ else
+ {
+ /* A WITH clause is possible here */
+ get_with_clause(query, &context);
+ /* Add the single-VALUES expression list */
+ appendContextKeyword(&context, "VALUES (",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 2);
+ get_rule_expr((Node *) strippedexprs, &context, false);
+ appendStringInfoChar(buf, ')');
+ }
+ /* Add RETURNING if present */
+ if (query->returningList)
+ {
+ appendContextKeyword(&context, " RETURNING",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_target_list(query->returningList, &context, NULL);
+ }
+}
+#endif
/* ----------
* get_query_def - Parse back one query parsetree
*
StringInfo buf = context->buf;
Oid argtypes[FUNC_MAX_ARGS];
int nargs;
+#ifdef PGXC
+ bool added_finalfn = false;
+#endif /* PGXC */
+
bool use_variadic;
+ /*
+ * For a combining aggregate, we look up and deparse the corresponding
+ * partial aggregate instead. This is necessary because our input
+ * argument list has been replaced; the new argument list always has just
+ * one element, which will point to a partial Aggref that supplies us with
+ * transition states to combine.
+ */
+ if (DO_AGGSPLIT_COMBINE(aggref->aggsplit))
+ {
+ TargetEntry *tle = linitial(aggref->args);
+
+ Assert(list_length(aggref->args) == 1);
+ Assert(IsA(tle, TargetEntry));
+ resolve_special_varno((Node *) tle->expr, context, original_aggref,
+ get_agg_combine_expr);
+ return;
+ }
+
+ /*
+ * Mark as PARTIAL, if appropriate. We look to the original aggref so as
+ * to avoid printing this when recursing from the code just above.
+ */
+ if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit))
+ appendStringInfoString(buf, "PARTIAL ");
+
/* Extract the argument types as seen by the parser */
nargs = get_aggregate_argtypes(aggref, argtypes);
}
appendStringInfoChar(buf, ')');
+
+#ifdef PGXC
+ if (added_finalfn)
+ appendStringInfoChar(buf, ')');
+#endif /* PGXC */
}
+ /*
+ * This is a helper function for get_agg_expr(). It's used when we deparse
+ * a combining Aggref; resolve_special_varno locates the corresponding partial
+ * Aggref and then calls this.
+ */
+ static void
+ get_agg_combine_expr(Node *node, deparse_context *context, void *private)
+ {
+ Aggref *aggref;
+ Aggref *original_aggref = private;
+
+ if (!IsA(node, Aggref))
+ elog(ERROR, "combining Aggref does not point to an Aggref");
+
+ aggref = (Aggref *) node;
+ get_agg_expr(aggref, context, original_aggref);
+ }
+
/*
* get_windowfunc_expr - Parse back a WindowFunc node
*/
* version.c
* Returns the PostgreSQL version string
*
- * Copyright (c) 1998-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Copyright (c) 1998-2016, PostgreSQL Global Development Group
*
* IDENTIFICATION
*
* problems can be overcome cheaply.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* lsyscache.c
* Convenience routines for common queries in the system catalog cache.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* be infrequent enough that more-detailed tracking is not worth the effort.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* relcache.c
* POSTGRES relation descriptor cache code
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
#include "commands/policy.h"
#include "commands/trigger.h"
#include "miscadmin.h"
+ #include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
- #include "optimizer/planmain.h"
#include "optimizer/prep.h"
#include "optimizer/var.h"
+#ifdef PGXC
+#include "pgxc/pgxc.h"
+#include "postmaster/autovacuum.h"
+#endif
#include "rewrite/rewriteDefine.h"
#include "rewrite/rowsecurity.h"
#include "storage/lmgr.h"
case RELPERSISTENCE_TEMP:
if (isTempOrTempToastNamespace(relation->rd_rel->relnamespace))
{
- relation->rd_backend = MyBackendId;
+#ifdef XCP
+ relation->rd_backend = OidIsValid(MyCoordId) ?
+ MyFirstBackendId : MyBackendId;
+#else
+
+ relation->rd_backend = BackendIdForTempRelations();
+#endif
relation->rd_islocaltemp = true;
}
else
break;
case RELPERSISTENCE_TEMP:
Assert(isTempOrTempToastNamespace(relnamespace));
- rel->rd_backend = MyBackendId;
+#ifdef XCP
+ if (OidIsValid(MyCoordId))
+ rel->rd_backend = MyFirstBackendId;
+ else
+#endif
+ rel->rd_backend = BackendIdForTempRelations();
rel->rd_islocaltemp = true;
break;
default:
* syscache.c
* System cache management routines
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
*
* IDENTIFICATION
*/
/* Determine whether message is enabled for server log output */
- output_to_server = is_log_level_output(elevel, log_min_messages);
+ output_to_server = is_log_level_output(elevel,
+#ifdef USE_MODULE_MSGIDS
+ moduleid,
+ fileid,
+ msgid,
+#endif
+ log_min_messages);
/* Determine whether message is enabled for client output */
- if (whereToSendOutput == DestRemote && elevel != COMMERROR)
+ if (whereToSendOutput == DestRemote && elevel != LOG_SERVER_ONLY)
{
/*
* client_min_messages is honored only after we complete the
* test is correct for testing whether the message should go to the client.
*/
static bool
-is_log_level_output(int elevel, int log_min_level)
+is_log_level_output(int elevel,
+#ifdef USE_MODULE_MSGIDS
+ int moduleid,
+ int fileid,
+ int msgid,
+#endif
+ int log_min_level)
{
- if (elevel == LOG || elevel == COMMERROR)
+#ifdef USE_MODULE_MSGIDS
+ /*
+ * Check if the message's compile time value has been changed during the
+ * run time.
+ *
+ * Currently, we only support increasing the log level of messages and that
+ * too only for deciding whether the message should go to the server log or
+ * not. A message which would otherwise not qualify to go to the server
+ * log, thus can be forced to be logged.
+ *
+ * In future, we may also want to go otherway round i.e. supressing a log
+ * message or also change severity of log messages. The latter may
+ * especially be useful to turn some specific ERROR messages into FATAL or
+ * PANIC to be able to get a core dump for analysis.
+ */
+ elevel = get_overridden_log_level(moduleid, fileid, msgid,
+ elevel);
+#endif
+
+ if (elevel == LOG || elevel == LOG_SERVER_ONLY)
{
if (log_min_level == LOG || log_min_level <= ERROR)
return true;
* globals.c
* global variable declarations
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
/* note: currently this is not valid in backend processes */
#endif
+#ifdef XCP
+Oid MyCoordId = InvalidOid;
+char MyCoordName[NAMEDATALEN];
+
+int MyCoordPid = 0;
+LocalTransactionId MyCoordLxid = 0;
+
+BackendId MyFirstBackendId = InvalidBackendId;
+#endif
+
BackendId MyBackendId = InvalidBackendId;
+ BackendId ParallelMasterBackendId = InvalidBackendId;
+
Oid MyDatabaseId = InvalidOid;
Oid MyDatabaseTableSpace = InvalidOid;
* miscinit.c
* miscellaneous initialization support stuff
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
#include <utime.h>
#endif
+#ifdef XCP
+#include "catalog/namespace.h"
+#endif
#include "access/htup_details.h"
#include "catalog/pg_authid.h"
+ #include "libpq/libpq.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
+#ifdef XCP
+#include "pgxc/execRemote.h"
+#endif
#include "postmaster/autovacuum.h"
#include "postmaster/postmaster.h"
#include "storage/fd.h"
if (lock_files == NIL)
on_proc_exit(UnlinkLockFiles, 0);
- lock_files = lappend(lock_files, pstrdup(filename));
+ /*
+ * Use lcons so that the lock files are unlinked in reverse order of
+ * creation; this is critical!
+ */
+ lock_files = lcons(pstrdup(filename), lock_files);
}
+void
+ForgetLockFiles()
+{
+ lock_files = NIL;
+}
+
/*
* Create the data directory lockfile.
*
* postinit.c
* postgres initialization utilities
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* See src/backend/utils/misc/README for more information.
*
*
- * Copyright (c) 2000-2012, PostgreSQL Global Development Group
- * Copyright (c) 2000-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Copyright (c) 2000-2016, PostgreSQL Global Development Group
*
* IDENTIFICATION
{NULL, 0, false}
};
+#ifdef PGXC
+/*
+ * Define remote connection types for PGXC
+ */
+static const struct config_enum_entry pgxc_conn_types[] = {
+ {"application", REMOTE_CONN_APP, false},
+ {"coordinator", REMOTE_CONN_COORD, false},
+ {"datanode", REMOTE_CONN_DATANODE, false},
+ {"gtm", REMOTE_CONN_GTM, false},
+ {"gtmproxy", REMOTE_CONN_GTM_PROXY, false},
+ {NULL, 0, false}
+};
+#endif
+
/*
- * Although only "on", "off", "remote_write", and "local" are documented, we
- * accept all the likely variants of "on" and "off".
+ * Although only "on", "off", "remote_apply", "remote_write", and "local" are
+ * documented, we accept all the likely variants of "on" and "off".
*/
static const struct config_enum_entry synchronous_commit_options[] = {
{"local", SYNCHRONOUS_COMMIT_LOCAL_FLUSH, false},
{NULL, 0, false}
};
-
+#ifdef XCP
+/*
+ * Set global-snapshot source. 'gtm' is default, but user can choose
+ * 'coordinator' for performance improvement at the cost of reduced consistency
+ */
+static const struct config_enum_entry global_snapshot_source_options[] = {
+ {"gtm", GLOBAL_SNAPSHOT_SOURCE_GTM, true},
+ {"coordinator", GLOBAL_SNAPSHOT_SOURCE_COORDINATOR, true},
+ {NULL, 0, false}
+};
+#endif
+
+ static const struct config_enum_entry force_parallel_mode_options[] = {
+ {"off", FORCE_PARALLEL_OFF, false},
+ {"on", FORCE_PARALLEL_ON, false},
+ {"regress", FORCE_PARALLEL_REGRESS, false},
+ {"true", FORCE_PARALLEL_ON, true},
+ {"false", FORCE_PARALLEL_OFF, true},
+ {"yes", FORCE_PARALLEL_ON, true},
+ {"no", FORCE_PARALLEL_OFF, true},
+ {"1", FORCE_PARALLEL_ON, true},
+ {"0", FORCE_PARALLEL_OFF, true},
+ {NULL, 0, false}
+ };
+
/*
* Options for enum values stored in other modules
*/
int tcp_keepalives_interval;
int tcp_keepalives_count;
+#ifdef XCP
+char *storm_catalog_remap_string;
+#endif
+ /*
+ * SSL renegotiation was been removed in PostgreSQL 9.5, but we tolerate it
+ * being set to zero (meaning never renegotiate) for backward compatibility.
+ * This avoids breaking compatibility with clients that have never supported
+ * renegotiation and therefore always try to zero it.
+ */
+ int ssl_renegotiation_limit;
+
/*
* This really belongs in pg_shmem.c, but is defined here so that it doesn't
* need to be duplicated in all the different implementations of pg_shmem.c.
NULL, NULL, NULL
},
+#ifdef XCP
+ {
+ {"global_snapshot_source", PGC_USERSET, DEVELOPER_OPTIONS,
+ gettext_noop("Set preferred source of a snapshot."),
+ gettext_noop("When set to 'coordinator', a snapshot is taken at "
+ "the coordinator at the risk of reduced consistency. "
+ "Default is 'gtm'")
+ },
+ &GlobalSnapshotSource,
+ GLOBAL_SNAPSHOT_SOURCE_GTM, global_snapshot_source_options,
+ NULL, NULL, NULL
+ },
+#endif
+ {
+ {"force_parallel_mode", PGC_USERSET, QUERY_TUNING_OTHER,
+ gettext_noop("Forces use of parallel query facilities."),
+ gettext_noop("If possible, run query using a parallel worker and with parallel restrictions.")
+ },
+ &force_parallel_mode,
+ FORCE_PARALLEL_OFF, force_parallel_mode_options,
+ NULL, NULL, NULL
+ },
/* End-of-list marker */
{
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
-#max_prepared_transactions = 0 # zero disables the feature
+#max_prepared_transactions = 10 # zero disables the feature
# (change requires restart)
- # Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory
- # per transaction slot, plus lock space (see max_locks_per_transaction).
- # It is not advisable to set max_prepared_transactions nonzero unless you
- # actively intend to use prepared transactions.
+ # Caution: it is not advisable to set max_prepared_transactions nonzero unless
+ # you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#maintenance_work_mem = 64MB # min 1MB
+ #replacement_sort_tuples = 150000 # limits use of replacement selection sort
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB
#dynamic_shared_memory_type = posix # the default is the first option
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
- #max_worker_processes = 8
+ #max_worker_processes = 8 # (change requires restart)
+ #max_parallel_workers_per_gather = 2 # taken from max_worker_processes
+ #old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
+ # (change requires restart)
+ #backend_flush_after = 0 # 0 disables, default is 0
+# - Shared queues -
+
+#shared_queues = 64 # min 16
+#shared_queue_size = 64KB # min 16KB
#------------------------------------------------------------------------------
# WRITE AHEAD LOG
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
+#network_byte_cost = 0.001 # same scale as above
+#remote_query_cost = 100.0 # same scale as above
+ #parallel_tuple_cost = 0.1 # same scale as above
+ #parallel_setup_cost = 1000.0 # same scale as above
+ #min_parallel_relation_size = 8MB
#effective_cache_size = 4GB
# - Genetic Query Optimizer -
/* This is a transient link to the active portal's memory context: */
MemoryContext PortalContext = NULL;
--static void MemoryContextCallResetCallbacks(MemoryContext context);
+static void MemoryContextStatsInternal(MemoryContext context, int level);
+#ifdef PGXC
+void *allocTopCxt(size_t s);
+#endif
+ static void MemoryContextStatsInternal(MemoryContext context, int level,
+ bool print, int max_children,
+ MemoryContextCounters *totals);
/*
* You should not do memory allocations within a critical section, because
* doesn't actually run the executor for them.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
#include "miscadmin.h"
#include "utils/builtins.h"
#include "utils/memutils.h"
+ #include "utils/snapmgr.h"
#include "utils/timestamp.h"
+#ifdef PGXC
+#include "pgxc/pgxc.h"
+#include "access/hash.h"
+#include "catalog/pg_collation.h"
+#include "utils/formatting.h"
+#include "utils/lsyscache.h"
+#endif
+
/*
* Estimate of the maximum number of open portals a user would have,
* used in initially sizing the PortalHashTable in EnablePortalManager().
/* drop cached plan reference, if any */
PortalReleaseCachedPlan(portal);
+#ifdef XCP
+ /*
+ * Skip memory release if portal is still producining, means has tuples in
+ * local memory, and has to push them to consumers. It would loose the
+ * tuples if free the memory now.
+ * The cleanup should be completed if the portal finished producing.
+ */
+ if (portalIsProducing(portal))
+ return;
+
+ if (portal->queryDesc)
+ {
+ ResourceOwner saveResourceOwner = CurrentResourceOwner;
+ CurrentResourceOwner = portal->resowner;
+ FreeQueryDesc(portal->queryDesc);
+ CurrentResourceOwner = saveResourceOwner;
+ portal->queryDesc = NULL;
+ }
+#endif
+
+ /*
+ * If portal has a snapshot protecting its data, release that. This needs
+ * a little care since the registration will be attached to the portal's
+ * resowner; if the portal failed, we will already have released the
+ * resowner (and the snapshot) during transaction abort.
+ */
+ if (portal->holdSnapshot)
+ {
+ if (portal->resowner)
+ UnregisterSnapshotFromOwner(portal->holdSnapshot,
+ portal->resowner);
+ portal->holdSnapshot = NULL;
+ }
+
/*
* Release any resources still attached to the portal. There are several
* cases being covered here:
ResourceOwner nextchild; /* next child of same parent */
const char *name; /* name (just for debugging) */
- /* We have built-in support for remembering owned buffers */
- int nbuffers; /* number of owned buffer pins */
- Buffer *buffers; /* dynamically allocated array */
- int maxbuffers; /* currently allocated array size */
+ /* We have built-in support for remembering: */
+ ResourceArray bufferarr; /* owned buffers */
+ ResourceArray catrefarr; /* catcache references */
+ ResourceArray catlistrefarr; /* catcache-list pins */
+ ResourceArray relrefarr; /* relcache references */
+ ResourceArray planrefarr; /* plancache references */
+ ResourceArray tupdescarr; /* tupdesc references */
+ ResourceArray snapshotarr; /* snapshot references */
+ ResourceArray filearr; /* open temporary files */
+ ResourceArray dsmarr; /* dynamic shmem segments */
++ ResourceArray prepstmts; /* prepared statements */
/* We can remember up to MAX_RESOWNER_LOCKS references to local locks. */
int nlocks; /* number of owned locks */
}
/* Ditto for temporary files */
- while (owner->nfiles > 0)
+ while (ResourceArrayGetAny(&(owner->filearr), &foundres))
{
+ File res = DatumGetFile(foundres);
+
if (isCommit)
- PrintFileLeakWarning(owner->files[owner->nfiles - 1]);
- FileClose(owner->files[owner->nfiles - 1]);
+ PrintFileLeakWarning(res);
+ FileClose(res);
}
+#ifdef XCP
+ /* Ditto for prepared statements */
+ while (owner->nstmts > 0)
+ {
+ char *stmt = owner->stmts + ((owner->nstmts - 1) * CNAME_MAXLEN);
+ if (isCommit)
+ PrintPreparedStmtLeakWarning(stmt);
+ DropPreparedStatement(stmt, false);
+ }
+#endif
+
/* Clean up index scans too */
ReleaseResources_hash();
}
ResourceOwnerNewParent(owner, NULL);
/* And free the object. */
- if (owner->buffers)
- pfree(owner->buffers);
- if (owner->catrefs)
- pfree(owner->catrefs);
- if (owner->catlistrefs)
- pfree(owner->catlistrefs);
- if (owner->relrefs)
- pfree(owner->relrefs);
- if (owner->planrefs)
- pfree(owner->planrefs);
- if (owner->tupdescs)
- pfree(owner->tupdescs);
- if (owner->snapshots)
- pfree(owner->snapshots);
- if (owner->files)
- pfree(owner->files);
- if (owner->dsms)
- pfree(owner->dsms);
- #ifdef XCP
- if (owner->stmts)
- pfree(owner->stmts);
- #endif
+ ResourceArrayFree(&(owner->bufferarr));
+ ResourceArrayFree(&(owner->catrefarr));
+ ResourceArrayFree(&(owner->catlistrefarr));
+ ResourceArrayFree(&(owner->relrefarr));
+ ResourceArrayFree(&(owner->planrefarr));
+ ResourceArrayFree(&(owner->tupdescarr));
+ ResourceArrayFree(&(owner->snapshotarr));
+ ResourceArrayFree(&(owner->filearr));
+ ResourceArrayFree(&(owner->dsmarr));
++ ResourceArrayFree(&(owner->prepstmts));
pfree(owner);
}
* above. Nonetheless, with large workMem we can have many tapes.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
int64 allowedMem; /* total memory allowed, in bytes */
int maxTapes; /* number of tapes (Knuth's T) */
int tapeRange; /* maxTapes-1 (Knuth's P) */
- MemoryContext sortcontext; /* memory context holding all sort data */
+ MemoryContext sortcontext; /* memory context holding most sort data */
+ MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
+#ifdef PGXC
+ ResponseCombiner *combiner; /* tuple source, alternate to tapeset */
+#endif /* PGXC */
/*
* These function pointers decouple the routines that must know what kind
void (*readtup) (Tuplesortstate *state, SortTuple *stup,
int tapenum, unsigned int len);
+#ifdef PGXC
+ /*
+ * Function to read length of next stored tuple.
+ * Used as 'len' parameter for readtup function.
+ */
+ unsigned int (*getlen) (Tuplesortstate *state, int tapenum, bool eofOK);
+#endif
+
+ /*
+ * Function to move a caller tuple. This is usually implemented as a
+ * memmove() shim, but function may also perform additional fix-up of
+ * caller tuple where needed. Batch memory support requires the movement
+ * of caller tuples from one location in memory to another.
+ */
+ void (*movetup) (void *dest, void *src, unsigned int len);
+
/*
* This array holds the tuples now in sort memory. If we are in state
* INITIAL, the tuples are in no particular order; if we are in state
#define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
#define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup))
#define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
- #define LACKMEM(state) ((state)->availMem < 0)
+#ifdef PGXC
+#define GETLEN(state,tape,eofOK) ((*(state)->getlen) (state, tape, eofOK))
+#endif
+ #define MOVETUP(dest,src,len) ((*(state)->movetup) (dest, src, len))
+ #define LACKMEM(state) ((state)->availMem < 0 && !(state)->batchUsed)
#define USEMEM(state,amt) ((state)->availMem -= (amt))
#define FREEMEM(state,amt) ((state)->availMem += (amt))
SortTuple *stup);
static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
int tapenum, unsigned int len);
+#ifdef PGXC
+static unsigned int getlen_datanode(Tuplesortstate *state, int tapenum,
+ bool eofOK);
+static void readtup_datanode(Tuplesortstate *state, SortTuple *stup,
+ int tapenum, unsigned int len);
+#endif
+ static void movetup_heap(void *dest, void *src, unsigned int len);
static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
Tuplesortstate *state);
static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
state->copytup = copytup_heap;
state->writetup = writetup_heap;
state->readtup = readtup_heap;
+#ifdef PGXC
+ state->getlen = getlen;
+#endif
+ state->movetup = movetup_heap;
state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
state->abbrevNext = 10;
state->copytup = copytup_index;
state->writetup = writetup_index;
state->readtup = readtup_index;
+#ifdef PGXC
+ state->getlen = getlen;
+#endif
+ state->movetup = movetup_index;
state->abbrevNext = 10;
state->heapRel = heapRel;
state->copytup = copytup_index;
state->writetup = writetup_index;
state->readtup = readtup_index;
+#ifdef PGXC
+ state->getlen = getlen;
+#endif
+ state->movetup = movetup_index;
state->heapRel = heapRel;
state->indexRel = indexRel;
state->copytup = copytup_datum;
state->writetup = writetup_datum;
state->readtup = readtup_datum;
+#ifdef PGXC
+ state->getlen = getlen;
+#endif
+ state->movetup = movetup_datum;
state->abbrevNext = 10;
state->datumType = datumType;
LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
}
-
+ /*
+ * Get memory for tuple from within READTUP() routine. Allocate
+ * memory and account for that, or consume from tape's batch
+ * allocation.
+ *
+ * Memory returned here in the final on-the-fly merge case is recycled
+ * from tape's batch allocation. Otherwise, callers must pfree() or
+ * reset tuple child memory context, and account for that with a
+ * FREEMEM(). Currently, this only ever needs to happen in WRITETUP()
+ * routines.
+ */
+ static void *
+ readtup_alloc(Tuplesortstate *state, int tapenum, Size tuplen)
+ {
+ if (state->batchUsed)
+ {
+ /*
+ * No USEMEM() call, because during final on-the-fly merge accounting
+ * is based on tape-private state. ("Overflow" allocations are
+ * detected as an indication that a new round or preloading is
+ * required. Preloading marks existing contents of tape's batch buffer
+ * for reuse.)
+ */
+ return mergebatchalloc(state, tapenum, tuplen);
+ }
+ else
+ {
+ char *ret;
+
+ /* Batch allocation yet to be performed */
+ ret = MemoryContextAlloc(state->tuplecontext, tuplen);
+ USEMEM(state, GetMemoryChunkSpace(ret));
+ return ret;
+ }
+ }
+
/*
* Routines specialized for HeapTuple (actually MinimalTuple) case
*/
&stup->isnull1);
}
+#ifdef PGXC
+static unsigned int
+getlen_datanode(Tuplesortstate *state, int tapenum, bool eofOK)
+{
+ ResponseCombiner *combiner = state->combiner;
+ TupleTableSlot *dstslot = combiner->ss.ps.ps_ResultTupleSlot;
+ TupleTableSlot *slot;
+
+ combiner->current_conn = tapenum;
+ slot = FetchTuple(combiner);
+ if (TupIsNull(slot))
+ {
+ if (eofOK)
+ return 0;
+ else
+ elog(ERROR, "unexpected end of data");
+ }
+
+ if (slot != dstslot)
+ ExecCopySlot(dstslot, slot);
+
+ return 1;
+}
+
+static void
+readtup_datanode(Tuplesortstate *state, SortTuple *stup,
+ int tapenum, unsigned int len)
+{
+ TupleTableSlot *slot = state->combiner->ss.ps.ps_ResultTupleSlot;
+ MinimalTuple tuple;
+ HeapTupleData htup;
+
+ Assert(!TupIsNull(slot));
+
+ /* copy the tuple into sort storage */
+ tuple = ExecCopySlotMinimalTuple(slot);
+ stup->tuple = (void *) tuple;
+ USEMEM(state, GetMemoryChunkSpace(tuple));
+ /* set up first-column key value */
+ htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
+ htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
+ stup->datum1 = heap_getattr(&htup,
+ state->sortKeys[0].ssup_attno,
+ state->tupDesc,
+ &stup->isnull1);
+}
+#endif /* PGXC */
+
+ static void
+ movetup_heap(void *dest, void *src, unsigned int len)
+ {
+ memmove(dest, src, len);
+ }
+
/*
* Routines specialized for the CLUSTER case (HeapTuple data, with
* comparisons per a btree index definition)
* before switching to the other state or activating a different read pointer.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* destroyed at the end of each transaction.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* for too long.)
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
-
+#ifdef PGXC
+#include "pgxc/pgxc.h"
+#endif
+ /*
+ * GUC parameters
+ */
+ int old_snapshot_threshold; /* number of minutes, -1 disables */
+
+ /*
+ * Structure for dealing with old_snapshot_threshold implementation.
+ */
+ typedef struct OldSnapshotControlData
+ {
+ /*
+ * Variables for old snapshot handling are shared among processes and are
+ * only allowed to move forward.
+ */
+ slock_t mutex_current; /* protect current_timestamp */
+ int64 current_timestamp; /* latest snapshot timestamp */
+ slock_t mutex_latest_xmin; /* protect latest_xmin and
+ * next_map_update */
+ TransactionId latest_xmin; /* latest snapshot xmin */
+ int64 next_map_update; /* latest snapshot valid up to */
+ slock_t mutex_threshold; /* protect threshold fields */
+ int64 threshold_timestamp; /* earlier snapshot is old */
+ TransactionId threshold_xid; /* earlier xid may be gone */
+
+ /*
+ * Keep one xid per minute for old snapshot error handling.
+ *
+ * Use a circular buffer with a head offset, a count of entries currently
+ * used, and a timestamp corresponding to the xid at the head offset. A
+ * count_used value of zero means that there are no times stored; a
+ * count_used value of OLD_SNAPSHOT_TIME_MAP_ENTRIES means that the buffer
+ * is full and the head must be advanced to add new entries. Use
+ * timestamps aligned to minute boundaries, since that seems less
+ * surprising than aligning based on the first usage timestamp. The
+ * latest bucket is effectively stored within latest_xmin. The circular
+ * buffer is updated when we get a new xmin value that doesn't fall into
+ * the same interval.
+ *
+ * It is OK if the xid for a given time slot is from earlier than
+ * calculated by adding the number of minutes corresponding to the
+ * (possibly wrapped) distance from the head offset to the time of the
+ * head entry, since that just results in the vacuuming of old tuples
+ * being slightly less aggressive. It would not be OK for it to be off in
+ * the other direction, since it might result in vacuuming tuples that are
+ * still expected to be there.
+ *
+ * Use of an SLRU was considered but not chosen because it is more
+ * heavyweight than is needed for this, and would probably not be any less
+ * code to implement.
+ *
+ * Persistence is not needed.
+ */
+ int head_offset; /* subscript of oldest tracked time */
+ int64 head_timestamp; /* time corresponding to head xid */
+ int count_used; /* how many slots are in use */
+ TransactionId xid_by_minute[FLEXIBLE_ARRAY_MEMBER];
+ } OldSnapshotControlData;
+
+ static volatile OldSnapshotControlData *oldSnapshotControl;
+
+
/*
* CurrentSnapshot points to the only snapshot taken in transaction-snapshot
* mode, and to the latest one taken in a read-committed transaction.
*
* This code is released under the terms of the PostgreSQL License.
*
- * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/bin/initdb/initdb.c
* (no quoting to worry about).
*/
static const char *boot_options = "-F";
-static const char *backend_options = "--single -F -O -j -c search_path=pg_catalog -c exit_on_error=true";
+static const char *backend_options = "--single "
+#ifdef PGXC
+ "--localxid "
+#endif
- "-F -O -c search_path=pg_catalog -c exit_on_error=true";
++ "-F -O -j -c search_path=pg_catalog -c exit_on_error=true";
- static const char *subdirs[] = {
+ static const char *const subdirs[] = {
"global",
- "pg_xlog",
"pg_xlog/archive_status",
"pg_clog",
"pg_commit_ts",
static void test_config_settings(void);
static void setup_config(void);
static void bootstrap_template1(void);
- static void setup_auth(void);
- static void get_set_pwd(void);
- static void setup_depend(void);
- static void setup_sysviews(void);
+ static void setup_auth(FILE *cmdfd);
+ static void get_set_pwd(FILE *cmdfd);
+ static void setup_depend(FILE *cmdfd);
+ static void setup_sysviews(FILE *cmdfd);
+#ifdef PGXC
+static void setup_nodeself(void);
+#endif
- static void setup_description(void);
- static void setup_collation(void);
- static void setup_conversion(void);
- static void setup_dictionary(void);
- static void setup_privileges(void);
+ static void setup_description(FILE *cmdfd);
+ static void setup_collation(FILE *cmdfd);
+ static void setup_conversion(FILE *cmdfd);
+ static void setup_dictionary(FILE *cmdfd);
+ static void setup_privileges(FILE *cmdfd);
static void set_info_version(void);
- static void setup_schema(void);
- static void load_plpgsql(void);
- static void vacuum_db(void);
- static void make_template0(void);
- static void make_postgres(void);
+ static void setup_schema(FILE *cmdfd);
+ static void load_plpgsql(FILE *cmdfd);
+ static void vacuum_db(FILE *cmdfd);
+ static void make_template0(FILE *cmdfd);
+ static void make_postgres(FILE *cmdfd);
static void fsync_pgdata(void);
static void trapsig(int signum);
static void check_ok(void);
free(*line);
}
- PG_CMD_CLOSE;
-
free(sysviews_setup);
-
- check_ok();
}
+#ifdef PGXC
+/*
+ * set up Postgres-XC cluster node catalog data with node self
+ * which is the node currently initialized.
+ */
+static void
+setup_nodeself(void)
+{
+ PG_CMD_DECL;
+
+ fputs(_("creating cluster information ... "), stdout);
+ fflush(stdout);
+
+ snprintf(cmd, sizeof(cmd),
+ "\"%s\" %s template1 >%s",
+ backend_exec, backend_options,
+ DEVNULL);
+
+ PG_CMD_OPEN;
+
+ PG_CMD_PRINTF1("CREATE NODE %s WITH (type = 'coordinator');\n",
+ nodename);
+
+ PG_CMD_CLOSE;
+
+ check_ok();
+}
+#endif
+
/*
* load description data
*/
PG_CMD_PUTS("CREATE TEMP TABLE tmp_pg_shdescription ( "
" objoid oid, "
" classname name, "
- " description text) WITHOUT OIDS;\n");
+ " description text) WITHOUT OIDS;\n\n");
- PG_CMD_PRINTF1("COPY tmp_pg_shdescription FROM E'%s';\n",
+ PG_CMD_PRINTF1("COPY tmp_pg_shdescription FROM E'%s';\n\n",
escape_quotes(shdesc_file));
+#ifdef XCP
+ PG_CMD_PUTS("INSERT INTO pg_catalog.pg_shdescription "
+#else
PG_CMD_PUTS("INSERT INTO pg_shdescription "
+#endif
" SELECT t.objoid, c.oid, t.description "
" FROM tmp_pg_shdescription t, pg_class c "
- " WHERE c.relname = t.classname;\n");
+ " WHERE c.relname = t.classname;\n\n");
/* Create default descriptions for operator implementation functions */
PG_CMD_PUTS("WITH funcdescs AS ( "
char **priv_lines;
static char *privileges_setup[] = {
"UPDATE pg_class "
- " SET relacl = E'{\"=r/\\\\\"$POSTGRES_SUPERUSERNAME\\\\\"\"}' "
- " WHERE relkind IN ('r', 'v', 'm', 'S') AND relacl IS NULL;\n",
- "GRANT USAGE ON SCHEMA pg_catalog TO PUBLIC;\n",
- "GRANT CREATE, USAGE ON SCHEMA public TO PUBLIC;\n",
+ " SET relacl = (SELECT array_agg(a.acl) FROM "
+ " (SELECT E'=r/\"$POSTGRES_SUPERUSERNAME\"' as acl "
+ " UNION SELECT unnest(pg_catalog.acldefault("
+ " CASE WHEN relkind = 'S' THEN 's' ELSE 'r' END::\"char\",10::oid))"
+ " ) as a) "
+ " WHERE relkind IN ('r', 'v', 'm', 'S') AND relacl IS NULL;\n\n",
+ "GRANT USAGE ON SCHEMA pg_catalog TO PUBLIC;\n\n",
+ "GRANT CREATE, USAGE ON SCHEMA public TO PUBLIC;\n\n",
+#ifdef XCP
+ "GRANT USAGE ON SCHEMA storm_catalog TO PUBLIC;\n",
+#endif
- "REVOKE ALL ON pg_largeobject FROM PUBLIC;\n",
+ "REVOKE ALL ON pg_largeobject FROM PUBLIC;\n\n",
+ "INSERT INTO pg_init_privs "
+ " (objoid, classoid, objsubid, initprivs, privtype)"
+ " SELECT"
+ " oid,"
+ " (SELECT oid FROM pg_class WHERE relname = 'pg_class'),"
+ " 0,"
+ " relacl,"
+ " 'i'"
+ " FROM"
+ " pg_class"
+ " WHERE"
+ " relacl IS NOT NULL"
+ " AND relkind IN ('r', 'v', 'm', 'S');",
+ "INSERT INTO pg_init_privs "
+ " (objoid, classoid, objsubid, initprivs, privtype)"
+ " SELECT"
+ " pg_class.oid,"
+ " (SELECT oid FROM pg_class WHERE relname = 'pg_class'),"
+ " pg_attribute.attnum,"
+ " pg_attribute.attacl,"
+ " 'i'"
+ " FROM"
+ " pg_class"
+ " JOIN pg_attribute ON (pg_class.oid = pg_attribute.attrelid)"
+ " WHERE"
+ " pg_attribute.attacl IS NOT NULL"
+ " AND pg_class.relkind IN ('r', 'v', 'm', 'S');",
+ "INSERT INTO pg_init_privs "
+ " (objoid, classoid, objsubid, initprivs, privtype)"
+ " SELECT"
+ " oid,"
+ " (SELECT oid FROM pg_class WHERE relname = 'pg_proc'),"
+ " 0,"
+ " proacl,"
+ " 'i'"
+ " FROM"
+ " pg_proc"
+ " WHERE"
+ " proacl IS NOT NULL;",
+ "INSERT INTO pg_init_privs "
+ " (objoid, classoid, objsubid, initprivs, privtype)"
+ " SELECT"
+ " oid,"
+ " (SELECT oid FROM pg_class WHERE relname = 'pg_type'),"
+ " 0,"
+ " typacl,"
+ " 'i'"
+ " FROM"
+ " pg_type"
+ " WHERE"
+ " typacl IS NOT NULL;",
+ "INSERT INTO pg_init_privs "
+ " (objoid, classoid, objsubid, initprivs, privtype)"
+ " SELECT"
+ " oid,"
+ " (SELECT oid FROM pg_class WHERE relname = 'pg_language'),"
+ " 0,"
+ " lanacl,"
+ " 'i'"
+ " FROM"
+ " pg_language"
+ " WHERE"
+ " lanacl IS NOT NULL;",
+ "INSERT INTO pg_init_privs "
+ " (objoid, classoid, objsubid, initprivs, privtype)"
+ " SELECT"
+ " oid,"
+ " (SELECT oid FROM pg_class WHERE "
+ " relname = 'pg_largeobject_metadata'),"
+ " 0,"
+ " lomacl,"
+ " 'i'"
+ " FROM"
+ " pg_largeobject_metadata"
+ " WHERE"
+ " lomacl IS NOT NULL;",
+ "INSERT INTO pg_init_privs "
+ " (objoid, classoid, objsubid, initprivs, privtype)"
+ " SELECT"
+ " oid,"
+ " (SELECT oid FROM pg_class WHERE relname = 'pg_namespace'),"
+ " 0,"
+ " nspacl,"
+ " 'i'"
+ " FROM"
+ " pg_namespace"
+ " WHERE"
+ " nspacl IS NOT NULL;",
+ "INSERT INTO pg_init_privs "
+ " (objoid, classoid, objsubid, initprivs, privtype)"
+ " SELECT"
+ " oid,"
+ " (SELECT oid FROM pg_class WHERE "
+ " relname = 'pg_foreign_data_wrapper'),"
+ " 0,"
+ " fdwacl,"
+ " 'i'"
+ " FROM"
+ " pg_foreign_data_wrapper"
+ " WHERE"
+ " fdwacl IS NOT NULL;",
+ "INSERT INTO pg_init_privs "
+ " (objoid, classoid, objsubid, initprivs, privtype)"
+ " SELECT"
+ " oid,"
+ " (SELECT oid FROM pg_class "
+ " WHERE relname = 'pg_foreign_server'),"
+ " 0,"
+ " srvacl,"
+ " 'i'"
+ " FROM"
+ " pg_foreign_server"
+ " WHERE"
+ " srvacl IS NOT NULL;",
NULL
};
* load PL/pgsql server-side language
*/
static void
- load_plpgsql(void)
+ load_plpgsql(FILE *cmdfd)
{
- PG_CMD_DECL;
-
- fputs(_("loading PL/pgSQL server-side language ... "), stdout);
- fflush(stdout);
-
- snprintf(cmd, sizeof(cmd),
- "\"%s\" %s template1 >%s",
- backend_exec, backend_options,
- DEVNULL);
-
- PG_CMD_OPEN;
-
- PG_CMD_PUTS("CREATE EXTENSION plpgsql;\n");
-
- PG_CMD_CLOSE;
-
- check_ok();
+ PG_CMD_PUTS("CREATE EXTENSION plpgsql;\n\n");
}
+#ifdef PGXC
+/*
+ * Vacuum Freeze given database. This is required to prevent xid wraparound
+ * issues when a node is brought up with xids out-of-sync w.r.t. gtm xids.
+ */
+static void
+vacuumfreeze(char *dbname)
+{
+ PG_CMD_DECL;
+ char msg[MAXPGPATH];
+ snprintf(msg, sizeof(msg), "freezing database %s ... ", dbname);
+
+ fputs(_(msg), stdout);
+ fflush(stdout);
+
+ snprintf(cmd, sizeof(cmd),
+ "\"%s\" %s %s >%s",
+ backend_exec, backend_options, dbname,
+ DEVNULL);
+
+ PG_CMD_OPEN;
+
+ PG_CMD_PUTS("VACUUM FREEZE;\n");
+
+ PG_CMD_CLOSE;
+
+ check_ok();
+}
+#endif /* PGXC */
+
/*
* clean everything up in template1
*/
/*
* We use the OID of template0 to determine lastsysoid
*/
+#ifdef XCP
+ "UPDATE pg_catalog.pg_database SET datlastsysoid = "
+ " (SELECT oid FROM pg_catalog.pg_database "
+#else
"UPDATE pg_database SET datlastsysoid = "
" (SELECT oid FROM pg_database "
- " WHERE datname = 'template0');\n",
+#endif
+ " WHERE datname = 'template0');\n\n",
/*
* Explicitly revoke public create-schema and create-temp-table
/*
* Finally vacuum to clean up dead rows in pg_database
*/
- "VACUUM FULL pg_catalog.pg_database;\n",
+#ifdef XCP
- "VACUUM FULL pg_database;\n",
++ "VACUUM pg_catalog.pg_database;\n",
+#else
+ "VACUUM pg_database;\n\n",
+#endif
NULL
};
*/
write_version_file("base/1");
- /* Create the stuff we don't need to use bootstrap mode for */
+ /*
+ * Create the stuff we don't need to use bootstrap mode for, using a
+ * backend running in simple standalone mode.
+ */
+ fputs(_("performing post-bootstrap initialization ... "), stdout);
+ fflush(stdout);
+
+ snprintf(cmd, sizeof(cmd),
+ "\"%s\" %s template1 >%s",
+ backend_exec, backend_options,
+ DEVNULL);
+
+ PG_CMD_OPEN;
- setup_auth();
+ setup_auth(cmdfd);
if (pwprompt || pwfilename)
- get_set_pwd();
+ get_set_pwd(cmdfd);
- setup_depend();
+ setup_depend(cmdfd);
- setup_sysviews();
+ setup_sysviews(cmdfd);
+#ifdef PGXC
+ /* Initialize catalog information about the node self */
+ setup_nodeself();
+#endif
+ setup_description(cmdfd);
- setup_description();
-
- setup_collation();
+ setup_collation(cmdfd);
- setup_conversion();
+ setup_conversion(cmdfd);
- setup_dictionary();
+ setup_dictionary(cmdfd);
- setup_privileges();
+ setup_privileges(cmdfd);
- setup_schema();
+ setup_schema(cmdfd);
- load_plpgsql();
+ load_plpgsql(cmdfd);
- vacuum_db();
+#ifdef XCP
+#ifdef NOT_USED
+ setup_storm();
+#endif
+#endif
+
+ vacuum_db(cmdfd);
- make_template0();
+ make_template0(cmdfd);
- make_postgres();
+ make_postgres(cmdfd);
+
+ PG_CMD_CLOSE;
+
+ check_ok();
}
/*
* Since there might be quotes to handle here, it is easier simply to pass
- * everything to a shell to process them.
- *
- * XXX it would be better to fork and exec so that we would know the child
- * postmaster's PID directly; then test_postmaster_connection could use
- * the PID without having to rely on reading it back from the pidfile.
+ * everything to a shell to process them. Use exec so that the postmaster
+ * has the same PID as the current child process.
*/
if (log_file != NULL)
- snprintf(cmd, MAXPGPATH, "\"%s\" %s %s%s < \"%s\" >> \"%s\" 2>&1 &",
+#ifdef PGXC
- snprintf(cmd, MAXPGPATH, "\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1 &",
++ snprintf(cmd, MAXPGPATH, "exec \"%s\" %s %s%s < \"%s\" >> \"%s\" 2>&1 &",
+ exec_path, pgxcCommand, pgdata_opt, post_opts,
+ DEVNULL, log_file);
+#else
+ snprintf(cmd, MAXPGPATH, "exec \"%s\" %s%s < \"%s\" >> \"%s\" 2>&1",
exec_path, pgdata_opt, post_opts,
DEVNULL, log_file);
+#endif
else
- snprintf(cmd, MAXPGPATH, "\"%s\" %s %s%s < \"%s\" 2>&1 &",
+#ifdef PGXC
- snprintf(cmd, MAXPGPATH, "\"%s\" %s%s < \"%s\" 2>&1 &",
++ snprintf(cmd, MAXPGPATH, "exec \"%s\" %s %s%s < \"%s\" 2>&1 &",
+ exec_path, pgxcCommand, pgdata_opt, post_opts, DEVNULL);
+#else
+ snprintf(cmd, MAXPGPATH, "exec \"%s\" %s%s < \"%s\" 2>&1",
exec_path, pgdata_opt, post_opts, DEVNULL);
+#endif
- return system(cmd);
+ (void) execl("/bin/sh", "/bin/sh", "-c", cmd, (char *) NULL);
+
+ /* exec failed */
+ write_stderr(_("%s: could not start server: %s\n"),
+ progname, strerror(errno));
+ exit(1);
+
+ return 0; /* keep dumb compilers quiet */
+
#else /* WIN32 */
/*
* pg_dump is a utility for dumping out a postgres database
* into a script file.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* pg_dump will read the system catalogs in a database and dump out a
static const CatalogId nilCatalogId = {0, 0};
+#ifdef PGXC
+static int include_nodes = 0;
+#endif
+
static void help(const char *progname);
- static void setup_connection(Archive *AH, DumpOptions *dopt,
+ static void setup_connection(Archive *AH,
const char *dumpencoding, const char *dumpsnapshot,
char *use_role);
static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
* namespaces. If specific namespaces are being dumped, dump just those
* namespaces. Otherwise, dump all non-system namespaces.
*/
+
if (table_include_oids.head != NULL)
- nsinfo->dobj.dump = false;
+ nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
else if (schema_include_oids.head != NULL)
- nsinfo->dobj.dump = simple_oid_list_member(&schema_include_oids,
- nsinfo->dobj.catId.oid);
+ nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
+ simple_oid_list_member(&schema_include_oids,
+ nsinfo->dobj.catId.oid) ?
+ DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
+ else if (fout->remoteVersion >= 90600 &&
+ strncmp(nsinfo->dobj.name, "pg_catalog",
+ strlen("pg_catalog")) == 0)
+
+ /*
+ * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
+ * they are interesting (and not the original ACLs which were set at
+ * initdb time, see pg_init_privs).
+ */
+ nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
+#ifdef XCP
+ strncmp(nsinfo->dobj.name, "storm_", 6) == 0 ||
+#endif
strcmp(nsinfo->dobj.name, "information_schema") == 0)
- nsinfo->dobj.dump = false;
+ nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
else
- nsinfo->dobj.dump = true;
+ nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
/*
* In any case, a namespace can be excluded by an exclusion switch
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
- "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
+#ifdef PGXC
+ "(SELECT pclocatortype from pgxc_class v where v.pcrelid = c.oid) AS pgxclocatortype,"
+ "(SELECT pcattnum from pgxc_class v where v.pcrelid = c.oid) AS pgxcattnum,"
+ "(SELECT string_agg(node_name,',') AS pgxc_node_names from pgxc_node n where n.oid in (select unnest(nodeoids) from pgxc_class v where v.pcrelid=c.oid) ) , "
+#endif
+ "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
"CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
"WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
- "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
+ "tc.reloptions AS toast_reloptions, "
+ "NULL AS changed_acl "
"FROM pg_class c "
"LEFT JOIN pg_depend d ON "
"(c.relkind = '%c' AND "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
- "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
+#ifdef PGXC
+ "(SELECT pclocatortype from pgxc_class v where v.pcrelid = c.oid) AS pgxclocatortype,"
+ "(SELECT pcattnum from pgxc_class v where v.pcrelid = c.oid) AS pgxcattnum,"
+ "(SELECT string_agg(node_name,',') AS pgxc_node_names from pgxc_node n where n.oid in (select unnest(nodeoids) from pgxc_class v where v.pcrelid=c.oid) ) , "
+#endif
+ "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
"CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
"WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
- "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
+ "tc.reloptions AS toast_reloptions, "
+ "NULL AS changed_acl "
"FROM pg_class c "
"LEFT JOIN pg_depend d ON "
"(c.relkind = '%c' AND "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
- "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
+#ifdef PGXC
+ "(SELECT pclocatortype from pgxc_class v where v.pcrelid = c.oid) AS pgxclocatortype,"
+ "(SELECT pcattnum from pgxc_class v where v.pcrelid = c.oid) AS pgxcattnum,"
+ "(SELECT string_agg(node_name,',') AS pgxc_node_names from pgxc_node n where n.oid in (select unnest(nodeoids) from pgxc_class v where v.pcrelid=c.oid) ) , "
+#endif
+ "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
"CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
"WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
- "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
+ "tc.reloptions AS toast_reloptions, "
+ "NULL AS changed_acl "
"FROM pg_class c "
"LEFT JOIN pg_depend d ON "
"(c.relkind = '%c' AND "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
- "array_to_string(c.reloptions, ', ') AS reloptions, "
- "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
+#ifdef PGXC
+ "(SELECT pclocatortype from pgxc_class v where v.pcrelid = c.oid) AS pgxclocatortype,"
+ "(SELECT pcattnum from pgxc_class v where v.pcrelid = c.oid) AS pgxcattnum,"
+ "(SELECT string_agg(node_name,',') AS pgxc_node_names from pgxc_node n where n.oid in (select unnest(nodeoids) from pgxc_class v where v.pcrelid=c.oid) ) , "
+#endif
+ "c.reloptions AS reloptions, "
+ "tc.reloptions AS toast_reloptions, "
+ "NULL AS changed_acl "
"FROM pg_class c "
"LEFT JOIN pg_depend d ON "
"(c.relkind = '%c' AND "
if (strftime(buf, sizeof(buf), PGDUMP_STRFTIME_FMT, localtime(&now)) != 0)
fprintf(OPF, "-- %s %s\n\n", msg, buf);
}
-
- /*
- * Append the given string to the buffer, with suitable quoting for passing
- * the string as a value, in a keyword/pair value in a libpq connection
- * string
- */
- static void
- doConnStrQuoting(PQExpBuffer buf, const char *str)
- {
- const char *s;
- bool needquotes;
-
- /*
- * If the string consists entirely of plain ASCII characters, no need to
- * quote it. This is quite conservative, but better safe than sorry.
- */
- needquotes = false;
- for (s = str; *s; s++)
- {
- if (!((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') ||
- (*s >= '0' && *s <= '9') || *s == '_' || *s == '.'))
- {
- needquotes = true;
- break;
- }
- }
-
- if (needquotes)
- {
- appendPQExpBufferChar(buf, '\'');
- while (*str)
- {
- /* ' and \ must be escaped by to \' and \\ */
- if (*str == '\'' || *str == '\\')
- appendPQExpBufferChar(buf, '\\');
-
- appendPQExpBufferChar(buf, *str);
- str++;
- }
- appendPQExpBufferChar(buf, '\'');
- }
- else
- appendPQExpBufferStr(buf, str);
- }
-
- /*
- * Append the given string to the shell command being built in the buffer,
- * with suitable shell-style quoting.
- */
- static void
- doShellQuoting(PQExpBuffer buf, const char *str)
- {
- const char *p;
-
- #ifndef WIN32
- appendPQExpBufferChar(buf, '\'');
- for (p = str; *p; p++)
- {
- if (*p == '\'')
- appendPQExpBufferStr(buf, "'\"'\"'");
- else
- appendPQExpBufferChar(buf, *p);
- }
- appendPQExpBufferChar(buf, '\'');
- #else /* WIN32 */
-
- appendPQExpBufferChar(buf, '"');
- for (p = str; *p; p++)
- {
- if (*p == '"')
- appendPQExpBufferStr(buf, "\\\"");
- else
- appendPQExpBufferChar(buf, *p);
- }
- appendPQExpBufferChar(buf, '"');
- #endif /* WIN32 */
- }
-
+
+#ifdef PGXC
+static void
+dumpNodes(PGconn *conn)
+{
+ PQExpBuffer query;
+ PGresult *res;
+ int num;
+ int i;
+
+ query = createPQExpBuffer();
+
+ appendPQExpBuffer(query, "select 'CREATE NODE ' || node_name || '"
+ " WITH (TYPE = ' || chr(39) || (case when node_type='C'"
+ " then 'coordinator' else 'datanode' end) || chr(39)"
+ " || ' , HOST = ' || chr(39) || node_host || chr(39)"
+ " || ', PORT = ' || node_port || (case when nodeis_primary='t'"
+ " then ', PRIMARY' else ' ' end) || (case when nodeis_preferred"
+ " then ', PREFERRED' else ' ' end) || ');' "
+ " as node_query from pg_catalog.pgxc_node order by oid");
+
+ res = executeQuery(conn, query->data);
+
+ num = PQntuples(res);
+
+ if (num > 0)
+ fprintf(OPF, "--\n-- Nodes\n--\n\n");
+
+ for (i = 0; i < num; i++)
+ {
+ fprintf(OPF, "%s\n", PQgetvalue(res, i, PQfnumber(res, "node_query")));
+ }
+ fprintf(OPF, "\n");
+
+ PQclear(res);
+ destroyPQExpBuffer(query);
+}
+
+static void
+dumpNodeGroups(PGconn *conn)
+{
+ PQExpBuffer query;
+ PGresult *res;
+ int num;
+ int i;
+
+ query = createPQExpBuffer();
+
+ appendPQExpBuffer(query,
+ "select 'CREATE NODE GROUP ' || pgxc_group.group_name"
+ " || ' WITH(' || string_agg(node_name,',') || ');'"
+ " as group_query from pg_catalog.pgxc_node, pg_catalog.pgxc_group"
+ " where pgxc_node.oid = any (pgxc_group.group_members)"
+ " group by pgxc_group.group_name"
+ " order by pgxc_group.group_name");
+
+ res = executeQuery(conn, query->data);
+
+ num = PQntuples(res);
+
+ if (num > 0)
+ fprintf(OPF, "--\n-- Node groups\n--\n\n");
+
+ for (i = 0; i < num; i++)
+ {
+ fprintf(OPF, "%s\n", PQgetvalue(res, i, PQfnumber(res, "group_query")));
+ }
+ fprintf(OPF, "\n");
+
+ PQclear(res);
+ destroyPQExpBuffer(query);
+}
+#endif
#include "commands/dbcommands_xlog.h"
#include "commands/sequence.h"
#include "commands/tablespace.h"
+ #include "replication/message.h"
+ #include "replication/origin.h"
#include "rmgrdesc.h"
- #include "storage/standby.h"
+ #include "storage/standbydefs.h"
#include "utils/relmapper.h"
+#ifdef XCP
+#include "pgxc/barrier.h"
+#endif
+
#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup) \
{ name, desc, identify},
int type; /* command type (SQL_COMMAND or META_COMMAND) */
int argc; /* number of command words */
char *argv[MAX_ARGS]; /* command word list */
- int cols[MAX_ARGS]; /* corresponding column starting from 1 */
- PgBenchExpr *expr; /* parsed expression */
+ PgBenchExpr *expr; /* parsed expression, if needed */
+ SimpleStats stats; /* time spent in this command */
} Command;
- typedef struct
+ typedef struct ParsedScript
{
+ const char *desc; /* script descriptor (eg, file name) */
+ int weight; /* selection weight */
+ Command **commands; /* NULL-terminated array of Commands */
+ StatsData stats; /* total time spent in script */
+ } ParsedScript;
+
+ static ParsedScript sql_script[MAX_SCRIPTS]; /* SQL script files */
+ static int num_scripts; /* number of scripts in sql_script[] */
+ static int num_commands = 0; /* total number of Command structs */
+ static int64 total_weight = 0;
- long start_time; /* when does the interval start */
- int cnt; /* number of transactions */
- int skipped; /* number of transactions skipped under --rate
- * and --latency-limit */
-
- double min_latency; /* min/max latencies */
- double max_latency;
- double sum_latency; /* sum(latency), sum(latency^2) - for
- * estimates */
- double sum2_latency;
+ static int debug = 0; /* debug flag */
- double min_lag;
- double max_lag;
- double sum_lag; /* sum(lag) */
- double sum2_lag; /* sum(lag*lag) */
- } AggVals;
+ /* Builtin test scripts */
+ typedef struct BuiltinScript
+ {
+ const char *name; /* very short name for -b ... */
+ const char *desc; /* short description */
+ const char *script; /* actual pgbench script */
+ } BuiltinScript;
- static Command **sql_files[MAX_FILES]; /* SQL script files */
- static int num_files; /* number of script files */
- static int num_commands = 0; /* total number of Command structs */
- static int debug = 0; /* debug flag */
+
- /* default scenario */
- static char *tpc_b = {
- "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
- "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
- "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
- "\\setrandom aid 1 :naccounts\n"
- "\\setrandom bid 1 :nbranches\n"
- "\\setrandom tid 1 :ntellers\n"
- "\\setrandom delta -5000 5000\n"
- "BEGIN;\n"
- "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n"
- "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
- "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid;\n"
- "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n"
- "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
- "END;\n"
- };
+
+ static const BuiltinScript builtin_script[] =
+ {
+ {
+ "tpcb-like",
+ "<builtin: TPC-B (sort of)>",
+ "\\set aid random(1, " CppAsString2(naccounts) " * :scale)\n"
+ "\\set bid random(1, " CppAsString2(nbranches) " * :scale)\n"
+ "\\set tid random(1, " CppAsString2(ntellers) " * :scale)\n"
+ "\\set delta random(-5000, 5000)\n"
+ "BEGIN;\n"
+ "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n"
+ "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
+ "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid;\n"
+ "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n"
+ "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
+ "END;\n"
+ },
+#ifdef PGXC
- static char *tpc_b_bid = {
- "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
- "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
- "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
- "\\setrandom aid 1 :naccounts\n"
- "\\setrandom bid 1 :nbranches\n"
- "\\setrandom tid 1 :ntellers\n"
- "\\setrandom delta -5000 5000\n"
- "BEGIN;\n"
- "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n"
- "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid\n"
- "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid AND bid = :bid;\n"
- "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n"
- "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
- "END;\n"
- };
++ {
++ "tpcb-like-bid",
++ "<builtin: TPC-B (sort of)>",
++ "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
++ "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
++ "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
++ "\\setrandom aid 1 :naccounts\n"
++ "\\setrandom bid 1 :nbranches\n"
++ "\\setrandom tid 1 :ntellers\n"
++ "\\setrandom delta -5000 5000\n"
++ "BEGIN;\n"
++ "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n"
++ "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid\n"
++ "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid AND bid = :bid;\n"
++ "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n"
++ "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
++ "END;\n"
++ },
+#endif
-
-
- /* -N case */
- static char *simple_update = {
- "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
- "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
- "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
- "\\setrandom aid 1 :naccounts\n"
- "\\setrandom bid 1 :nbranches\n"
- "\\setrandom tid 1 :ntellers\n"
- "\\setrandom delta -5000 5000\n"
- "BEGIN;\n"
- "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n"
- "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
- "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
- "END;\n"
- };
-
+ {
+ "simple-update",
+ "<builtin: simple update>",
+ "\\set aid random(1, " CppAsString2(naccounts) " * :scale)\n"
+ "\\set bid random(1, " CppAsString2(nbranches) " * :scale)\n"
+ "\\set tid random(1, " CppAsString2(ntellers) " * :scale)\n"
+ "\\set delta random(-5000, 5000)\n"
+ "BEGIN;\n"
+ "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n"
+ "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
+ "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
+ "END;\n"
+ },
+#ifdef PGXC
- static char *simple_update_bid = {
- "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
- "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
- "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
- "\\setrandom aid 1 :naccounts\n"
- "\\setrandom bid 1 :nbranches\n"
- "\\setrandom tid 1 :ntellers\n"
- "\\setrandom delta -5000 5000\n"
- "BEGIN;\n"
- "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n"
- "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid;\n"
- "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
- "END;\n"
- };
++ {
++ "simple-update-bid",
++ "<builtin: simple update bid>",
++ "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
++ "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
++ "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
++ "\\setrandom aid 1 :naccounts\n"
++ "\\setrandom bid 1 :nbranches\n"
++ "\\setrandom tid 1 :ntellers\n"
++ "\\setrandom delta -5000 5000\n"
++ "BEGIN;\n"
++ "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n"
++ "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid;\n"
++ "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
++ "END;\n"
++ },
+#endif
-
- /* -S case */
- static char *select_only = {
- "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
- "\\setrandom aid 1 :naccounts\n"
- "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
+ {
+ "select-only",
+ "<builtin: select only>",
+ "\\set aid random(1, " CppAsString2(naccounts) " * :scale)\n"
+ "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
+ }
};
+
/* Function prototypes */
- static void setalarm(int seconds);
+ static void setIntValue(PgBenchValue *pv, int64 ival);
+ static void setDoubleValue(PgBenchValue *pv, double dval);
+ static bool evaluateExpr(TState *, CState *, PgBenchExpr *, PgBenchValue *);
+ static void doLog(TState *thread, CState *st, instr_time *now,
+ StatsData *agg, bool skipped, double latency, double lag);
+ static void processXactStats(TState *thread, CState *st, instr_time *now,
+ bool skipped, StatsData *agg);
+ static void pgbench_error(const char *fmt,...) pg_attribute_printf(1, 2);
+ static void addScript(ParsedScript script);
static void *threadRun(void *arg);
+ static void setalarm(int seconds);
+
+
+ /* callback functions for our flex lexer */
+ static const PsqlScanCallbacks pgbench_callbacks = {
+ NULL, /* don't need get_variable functionality */
+ pgbench_error
+ };
- static void doLog(TState *thread, CState *st, FILE *logfile, instr_time *now,
- AggVals *agg, bool skipped);
static void
usage(void)
" -C, --connect establish new connection for each transaction\n"
" -D, --define=VARNAME=VALUE\n"
" define variable for use by custom script\n"
- " -f, --file=FILENAME read transaction script from FILENAME\n"
+#ifdef PGXC
+ " -k query with default key and additional key branch id (bid)\n"
+#endif
" -j, --jobs=NUM number of threads (default: 1)\n"
" -l, --log write transaction times to log file\n"
- " -L, --latency-limit=NUM count transactions lasting more than NUM ms\n"
- " as late.\n"
+ " -L, --latency-limit=NUM count transactions lasting more than NUM ms as late\n"
" -M, --protocol=simple|extended|prepared\n"
" protocol for submitting queries (default: simple)\n"
" -n, --no-vacuum do not run VACUUM before tests\n"
state = (CState *) pg_malloc(sizeof(CState));
memset(state, 0, sizeof(CState));
- while ((c = getopt_long(argc, argv, "ih:knvp:dqSNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
+#ifdef PGXC
- while ((c = getopt_long(argc, argv, "ih:nvp:dqSNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
++ while ((c = getopt_long(argc, argv, "ih:knvp:dqb:SNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
+#else
+ while ((c = getopt_long(argc, argv, "ih:nvp:dqb:SNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
+#endif
{
+ char *script;
+
switch (c)
{
case 'i':
#define THING_NO_SHOW (THING_NO_CREATE | THING_NO_DROP)
static const pgsql_thing_t words_after_create[] = {
+ {"ACCESS METHOD", NULL, NULL},
{"AGGREGATE", NULL, &Query_for_list_of_aggregates},
+#ifdef PGXC
+ {"BARRIER", NULL, NULL}, /* Comes barrier name next, so skip it */
+#endif
{"CAST", NULL, NULL}, /* Casts have complex structures for names, so
* skip it */
{"COLLATION", "SELECT pg_catalog.quote_ident(collname) FROM pg_catalog.pg_collation WHERE collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) AND substring(pg_catalog.quote_ident(collname),1,%d)='%s'"},
{"GROUP", Query_for_list_of_roles},
{"LANGUAGE", Query_for_list_of_languages},
{"INDEX", NULL, &Query_for_list_of_indexes},
- {"MATERIALIZED VIEW", NULL, NULL},
+#ifdef PGXC
+ {"NODE", Query_for_list_of_available_nodenames},
+ {"NODE GROUP", Query_for_list_of_available_nodegroup_names},
+#endif
+ {"MATERIALIZED VIEW", NULL, &Query_for_list_of_matviews},
{"OPERATOR", NULL, NULL}, /* Querying for this is probably not such a
* good idea. */
{"OWNED", NULL, NULL, THING_NO_CREATE}, /* for DROP OWNED BY ... */
#define prev4_wd (previous_words[3])
#define prev5_wd (previous_words[4])
#define prev6_wd (previous_words[5])
+ #define prev7_wd (previous_words[6])
+ #define prev8_wd (previous_words[7])
+ #define prev9_wd (previous_words[8])
+
+ /* Macros for matching the last N words before point, case-insensitively. */
+ #define TailMatches1(p1) \
+ (previous_words_count >= 1 && \
+ word_matches(p1, prev_wd))
+
+ #define TailMatches2(p2, p1) \
+ (previous_words_count >= 2 && \
+ word_matches(p1, prev_wd) && \
+ word_matches(p2, prev2_wd))
+
+ #define TailMatches3(p3, p2, p1) \
+ (previous_words_count >= 3 && \
+ word_matches(p1, prev_wd) && \
+ word_matches(p2, prev2_wd) && \
+ word_matches(p3, prev3_wd))
+
+ #define TailMatches4(p4, p3, p2, p1) \
+ (previous_words_count >= 4 && \
+ word_matches(p1, prev_wd) && \
+ word_matches(p2, prev2_wd) && \
+ word_matches(p3, prev3_wd) && \
+ word_matches(p4, prev4_wd))
+
+ #define TailMatches5(p5, p4, p3, p2, p1) \
+ (previous_words_count >= 5 && \
+ word_matches(p1, prev_wd) && \
+ word_matches(p2, prev2_wd) && \
+ word_matches(p3, prev3_wd) && \
+ word_matches(p4, prev4_wd) && \
+ word_matches(p5, prev5_wd))
+
+ #define TailMatches6(p6, p5, p4, p3, p2, p1) \
+ (previous_words_count >= 6 && \
+ word_matches(p1, prev_wd) && \
+ word_matches(p2, prev2_wd) && \
+ word_matches(p3, prev3_wd) && \
+ word_matches(p4, prev4_wd) && \
+ word_matches(p5, prev5_wd) && \
+ word_matches(p6, prev6_wd))
+
+ #define TailMatches7(p7, p6, p5, p4, p3, p2, p1) \
+ (previous_words_count >= 7 && \
+ word_matches(p1, prev_wd) && \
+ word_matches(p2, prev2_wd) && \
+ word_matches(p3, prev3_wd) && \
+ word_matches(p4, prev4_wd) && \
+ word_matches(p5, prev5_wd) && \
+ word_matches(p6, prev6_wd) && \
+ word_matches(p7, prev7_wd))
+
+ #define TailMatches8(p8, p7, p6, p5, p4, p3, p2, p1) \
+ (previous_words_count >= 8 && \
+ word_matches(p1, prev_wd) && \
+ word_matches(p2, prev2_wd) && \
+ word_matches(p3, prev3_wd) && \
+ word_matches(p4, prev4_wd) && \
+ word_matches(p5, prev5_wd) && \
+ word_matches(p6, prev6_wd) && \
+ word_matches(p7, prev7_wd) && \
+ word_matches(p8, prev8_wd))
+
+ #define TailMatches9(p9, p8, p7, p6, p5, p4, p3, p2, p1) \
+ (previous_words_count >= 9 && \
+ word_matches(p1, prev_wd) && \
+ word_matches(p2, prev2_wd) && \
+ word_matches(p3, prev3_wd) && \
+ word_matches(p4, prev4_wd) && \
+ word_matches(p5, prev5_wd) && \
+ word_matches(p6, prev6_wd) && \
+ word_matches(p7, prev7_wd) && \
+ word_matches(p8, prev8_wd) && \
+ word_matches(p9, prev9_wd))
+
+ /* Macros for matching the last N words before point, case-sensitively. */
+ #define TailMatchesCS1(p1) \
+ (previous_words_count >= 1 && \
+ word_matches_cs(p1, prev_wd))
+ #define TailMatchesCS2(p2, p1) \
+ (previous_words_count >= 2 && \
+ word_matches_cs(p1, prev_wd) && \
+ word_matches_cs(p2, prev2_wd))
+
+ /*
+ * Macros for matching N words beginning at the start of the line,
+ * case-insensitively.
+ */
+ #define Matches1(p1) \
+ (previous_words_count == 1 && \
+ TailMatches1(p1))
+ #define Matches2(p1, p2) \
+ (previous_words_count == 2 && \
+ TailMatches2(p1, p2))
+ #define Matches3(p1, p2, p3) \
+ (previous_words_count == 3 && \
+ TailMatches3(p1, p2, p3))
+ #define Matches4(p1, p2, p3, p4) \
+ (previous_words_count == 4 && \
+ TailMatches4(p1, p2, p3, p4))
+ #define Matches5(p1, p2, p3, p4, p5) \
+ (previous_words_count == 5 && \
+ TailMatches5(p1, p2, p3, p4, p5))
+ #define Matches6(p1, p2, p3, p4, p5, p6) \
+ (previous_words_count == 6 && \
+ TailMatches6(p1, p2, p3, p4, p5, p6))
+ #define Matches7(p1, p2, p3, p4, p5, p6, p7) \
+ (previous_words_count == 7 && \
+ TailMatches7(p1, p2, p3, p4, p5, p6, p7))
+ #define Matches8(p1, p2, p3, p4, p5, p6, p7, p8) \
+ (previous_words_count == 8 && \
+ TailMatches8(p1, p2, p3, p4, p5, p6, p7, p8))
+ #define Matches9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \
+ (previous_words_count == 9 && \
+ TailMatches9(p1, p2, p3, p4, p5, p6, p7, p8, p9))
+ /*
+ * Macros for matching N words at the start of the line, regardless of
+ * what is after them, case-insensitively.
+ */
+ #define HeadMatches1(p1) \
+ (previous_words_count >= 1 && \
+ word_matches(p1, previous_words[previous_words_count - 1]))
+
+ #define HeadMatches2(p1, p2) \
+ (previous_words_count >= 2 && \
+ word_matches(p1, previous_words[previous_words_count - 1]) && \
+ word_matches(p2, previous_words[previous_words_count - 2]))
+
+ #define HeadMatches3(p1, p2, p3) \
+ (previous_words_count >= 3 && \
+ word_matches(p1, previous_words[previous_words_count - 1]) && \
+ word_matches(p2, previous_words[previous_words_count - 2]) && \
+ word_matches(p3, previous_words[previous_words_count - 3]))
+
+ /* Known command-starting keywords. */
static const char *const sql_commands[] = {
+#ifdef PGXC
+ /*
+ * Added "CLEAN" and "EXECUTE DIRECT"
+ * Removed LISTEN, NOTIFY, RELEASE, SAVEPOINT and UNLISTEN
+ */
+ "ABORT", "ALTER", "ANALYZE", "BEGIN", "CHECKPOINT", "CLEAN CONNECTION", "CLOSE", "CLUSTER",
+ "COMMENT", "COMMIT", "COPY", "CREATE", "DEALLOCATE", "DECLARE",
+ "DELETE FROM", "DISCARD", "DO", "DROP", "END", "EXECUTE", "EXECUTE DIRECT", "EXPLAIN", "FETCH",
+ "GRANT", "INSERT", "LOAD", "LOCK", "MOVE", "PREPARE",
+ "REASSIGN", "REINDEX", "RESET", "REVOKE", "ROLLBACK",
+ "SECURITY LABEL", "SELECT", "SET", "SHOW", "START",
+ "TABLE", "TRUNCATE", "UPDATE", "VACUUM", "VALUES", "WITH",
+#else
"ABORT", "ALTER", "ANALYZE", "BEGIN", "CHECKPOINT", "CLOSE", "CLUSTER",
"COMMENT", "COMMIT", "COPY", "CREATE", "DEALLOCATE", "DECLARE",
"DELETE FROM", "DISCARD", "DO", "DROP", "END", "EXECUTE", "EXPLAIN",
"FETCH", "GRANT", "IMPORT", "INSERT", "LISTEN", "LOAD", "LOCK",
"MOVE", "NOTIFY", "PREPARE",
- "REASSIGN", "REFRESH", "REINDEX", "RELEASE", "RESET", "REVOKE", "ROLLBACK",
+ "REASSIGN", "REFRESH MATERIALIZED VIEW", "REINDEX", "RELEASE",
+ "RESET", "REVOKE", "ROLLBACK",
"SAVEPOINT", "SECURITY LABEL", "SELECT", "SET", "SHOW", "START",
"TABLE", "TRUNCATE", "UNLISTEN", "UPDATE", "VACUUM", "VALUES", "WITH",
+#endif
NULL
};
/* ALTER */
/* ALTER TABLE */
- else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev_wd, "TABLE") == 0)
- {
+ else if (Matches2("ALTER", "TABLE"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables,
"UNION SELECT 'ALL IN TABLESPACE'");
- }
- /*
- * complete with what you can alter (TABLE, GROUP, USER, ...) unless we're
- * in ALTER TABLE sth ALTER
- */
- else if (pg_strcasecmp(prev_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "TABLE") != 0)
+ /* ALTER something */
+ else if (Matches1("ALTER"))
{
static const char *const list_ALTER[] =
+#ifdef PGXC
+ /*
+ * Added: "NODE" (NODE NAME cannot be altered).
+ * Removed: "FOREIGN DATA WRAPPER", "FOREIGN TABLE", "LARGE OBJECT",
+ * "SERVER", "TRIGGER", "USER MAPPING FOR".
+ */
+ {"AGGREGATE", "COLLATION", "CONVERSION", "DATABASE", "DEFAULT PRIVILEGES", "DOMAIN",
+ "EXTENSION", "FUNCTION",
+ "GROUP", "INDEX", "LANGUAGE", "NODE", "NODE GROUP", "OPERATOR",
+ "ROLE", "SCHEMA", "SEQUENCE", "TABLE",
+ "TABLESPACE", "TEXT SEARCH", "TYPE",
+ "USER", "VIEW", NULL};
+#else
{"AGGREGATE", "COLLATION", "CONVERSION", "DATABASE", "DEFAULT PRIVILEGES", "DOMAIN",
"EVENT TRIGGER", "EXTENSION", "FOREIGN DATA WRAPPER", "FOREIGN TABLE", "FUNCTION",
"GROUP", "INDEX", "LANGUAGE", "LARGE OBJECT", "MATERIALIZED VIEW", "OPERATOR",
else
COMPLETE_WITH_FUNCTION_ARG(prev2_wd);
}
- else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev_wd, "NODE") == 0)
- {
+#ifdef PGXC
+ /* ALTER NODE */
- }
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "NODE") == 0)
- {
++ else if (Matches2("ALTER", "NODE"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_available_nodenames);
- }
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "NODE") == 0 &&
- pg_strcasecmp(prev_wd, "WITH") == 0)
- {
++ else if (Matches2("ALTER", "NODE"))
+ COMPLETE_WITH_CONST("WITH");
- }
- else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev4_wd, "NODE") == 0 &&
- pg_strcasecmp(prev2_wd, "WITH") == 0)
- {
- static const char *const list_NODEOPTIONS[] =
- {"TYPE", "HOST", "PORT", "PRIMARY", "PREFERRED", NULL};
++ else if (Matches3("ALTER", "NODE", "WITH"))
+ COMPLETE_WITH_CONST("(");
++ else if (Matches3("ALTER", "NODE", "WITH"))
- COMPLETE_WITH_LIST(list_NODEOPTIONS);
- }
++ COMPLETE_WITH_LIST5("TYPE", "HOST", "PORT", "PRIMARY", "PREFERRED");
+#endif
/* ALTER SCHEMA <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "SCHEMA") == 0)
- {
- static const char *const list_ALTERGEN[] =
- {"OWNER TO", "RENAME TO", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERGEN);
- }
+ else if (Matches3("ALTER", "SCHEMA", MatchAny))
+ COMPLETE_WITH_LIST2("OWNER TO", "RENAME TO");
/* ALTER COLLATION <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "COLLATION") == 0)
- {
- static const char *const list_ALTERGEN[] =
- {"OWNER TO", "RENAME TO", "SET SCHEMA", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERGEN);
- }
+ else if (Matches3("ALTER", "COLLATION", MatchAny))
+ COMPLETE_WITH_LIST3("OWNER TO", "RENAME TO", "SET SCHEMA");
/* ALTER CONVERSION <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "CONVERSION") == 0)
- {
- static const char *const list_ALTERGEN[] =
- {"OWNER TO", "RENAME TO", "SET SCHEMA", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERGEN);
- }
+ else if (Matches3("ALTER", "CONVERSION", MatchAny))
+ COMPLETE_WITH_LIST3("OWNER TO", "RENAME TO", "SET SCHEMA");
/* ALTER DATABASE <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "DATABASE") == 0)
- {
- static const char *const list_ALTERDATABASE[] =
- {"RESET", "SET", "OWNER TO", "RENAME TO", "IS_TEMPLATE",
- "ALLOW_CONNECTIONS", "CONNECTION LIMIT", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERDATABASE);
- }
+ else if (Matches3("ALTER", "DATABASE", MatchAny))
+ COMPLETE_WITH_LIST7("RESET", "SET", "OWNER TO", "RENAME TO",
+ "IS_TEMPLATE", "ALLOW_CONNECTIONS",
+ "CONNECTION LIMIT");
/* ALTER EVENT TRIGGER */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "EVENT") == 0 &&
- pg_strcasecmp(prev_wd, "TRIGGER") == 0)
- {
+ else if (Matches3("ALTER", "EVENT", "TRIGGER"))
COMPLETE_WITH_QUERY(Query_for_list_of_event_triggers);
- }
/* ALTER EVENT TRIGGER <name> */
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "EVENT") == 0 &&
- pg_strcasecmp(prev2_wd, "TRIGGER") == 0)
- {
- static const char *const list_ALTER_EVENT_TRIGGER[] =
- {"DISABLE", "ENABLE", "OWNER TO", "RENAME TO", NULL};
-
- COMPLETE_WITH_LIST(list_ALTER_EVENT_TRIGGER);
- }
+ else if (Matches4("ALTER", "EVENT", "TRIGGER", MatchAny))
+ COMPLETE_WITH_LIST4("DISABLE", "ENABLE", "OWNER TO", "RENAME TO");
/* ALTER EVENT TRIGGER <name> ENABLE */
- else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev4_wd, "EVENT") == 0 &&
- pg_strcasecmp(prev3_wd, "TRIGGER") == 0 &&
- pg_strcasecmp(prev_wd, "ENABLE") == 0)
- {
- static const char *const list_ALTER_EVENT_TRIGGER_ENABLE[] =
- {"REPLICA", "ALWAYS", NULL};
-
- COMPLETE_WITH_LIST(list_ALTER_EVENT_TRIGGER_ENABLE);
- }
+ else if (Matches5("ALTER", "EVENT", "TRIGGER", MatchAny, "ENABLE"))
+ COMPLETE_WITH_LIST2("REPLICA", "ALWAYS");
/* ALTER EXTENSION <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "EXTENSION") == 0)
- {
- static const char *const list_ALTEREXTENSION[] =
- {"ADD", "DROP", "UPDATE", "SET SCHEMA", NULL};
-
- COMPLETE_WITH_LIST(list_ALTEREXTENSION);
- }
+ else if (Matches3("ALTER", "EXTENSION", MatchAny))
+ COMPLETE_WITH_LIST4("ADD", "DROP", "UPDATE", "SET SCHEMA");
+#ifndef PGXC
+ /* PGXCTODO: This should be re-enabled once FOREIGN DATA WRAPPER is supported */
/* ALTER FOREIGN */
- else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev_wd, "FOREIGN") == 0)
- {
- static const char *const list_ALTER_FOREIGN[] =
- {"DATA WRAPPER", "TABLE", NULL};
-
- COMPLETE_WITH_LIST(list_ALTER_FOREIGN);
- }
+ else if (Matches2("ALTER", "FOREIGN"))
+ COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE");
/* ALTER FOREIGN DATA WRAPPER <name> */
- else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev4_wd, "FOREIGN") == 0 &&
- pg_strcasecmp(prev3_wd, "DATA") == 0 &&
- pg_strcasecmp(prev2_wd, "WRAPPER") == 0)
- {
- static const char *const list_ALTER_FDW[] =
- {"HANDLER", "VALIDATOR", "OPTIONS", "OWNER TO", NULL};
-
- COMPLETE_WITH_LIST(list_ALTER_FDW);
- }
+ else if (Matches5("ALTER", "FOREIGN", "DATA", "WRAPPER", MatchAny))
+ COMPLETE_WITH_LIST5("HANDLER", "VALIDATOR", "OPTIONS", "OWNER TO", "RENAME TO");
/* ALTER FOREIGN TABLE <name> */
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "FOREIGN") == 0 &&
- pg_strcasecmp(prev2_wd, "TABLE") == 0)
+ else if (Matches4("ALTER", "FOREIGN", "TABLE", MatchAny))
{
static const char *const list_ALTER_FOREIGN_TABLE[] =
{"ADD", "ALTER", "DISABLE TRIGGER", "DROP", "ENABLE", "INHERIT",
COMPLETE_WITH_LIST(list_ALTER_FOREIGN_TABLE);
}
+#endif
/* ALTER INDEX */
- else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev_wd, "INDEX") == 0)
- {
+ else if (Matches2("ALTER", "INDEX"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes,
"UNION SELECT 'ALL IN TABLESPACE'");
- }
/* ALTER INDEX <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "INDEX") == 0)
- {
- static const char *const list_ALTERINDEX[] =
- {"OWNER TO", "RENAME TO", "SET", "RESET", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERINDEX);
- }
+ else if (Matches3("ALTER", "INDEX", MatchAny))
+ COMPLETE_WITH_LIST4("OWNER TO", "RENAME TO", "SET", "RESET");
/* ALTER INDEX <name> SET */
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "INDEX") == 0 &&
- pg_strcasecmp(prev_wd, "SET") == 0)
- {
- static const char *const list_ALTERINDEXSET[] =
- {"(", "TABLESPACE", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERINDEXSET);
- }
+ else if (Matches4("ALTER", "INDEX", MatchAny, "SET"))
+ COMPLETE_WITH_LIST2("(", "TABLESPACE");
/* ALTER INDEX <name> RESET */
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "INDEX") == 0 &&
- pg_strcasecmp(prev_wd, "RESET") == 0)
+ else if (Matches4("ALTER", "INDEX", MatchAny, "RESET"))
COMPLETE_WITH_CONST("(");
/* ALTER INDEX <foo> SET|RESET ( */
- else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev4_wd, "INDEX") == 0 &&
- (pg_strcasecmp(prev2_wd, "SET") == 0 ||
- pg_strcasecmp(prev2_wd, "RESET") == 0) &&
- pg_strcasecmp(prev_wd, "(") == 0)
- {
- static const char *const list_INDEXOPTIONS[] =
- {"fillfactor", "fastupdate", "gin_pending_list_limit", NULL};
-
- COMPLETE_WITH_LIST(list_INDEXOPTIONS);
- }
+ else if (Matches5("ALTER", "INDEX", MatchAny, "RESET", "("))
+ COMPLETE_WITH_LIST3("fillfactor", "fastupdate",
+ "gin_pending_list_limit");
+ else if (Matches5("ALTER", "INDEX", MatchAny, "SET", "("))
+ COMPLETE_WITH_LIST3("fillfactor =", "fastupdate =",
+ "gin_pending_list_limit =");
/* ALTER LANGUAGE <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "LANGUAGE") == 0)
- {
- static const char *const list_ALTERLANGUAGE[] =
- {"OWNER TO", "RENAME TO", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERLANGUAGE);
- }
+ else if (Matches3("ALTER", "LANGUAGE", MatchAny))
+ COMPLETE_WITH_LIST2("OWNER_TO", "RENAME TO");
/* ALTER LARGE OBJECT <oid> */
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "LARGE") == 0 &&
- pg_strcasecmp(prev2_wd, "OBJECT") == 0)
- {
- static const char *const list_ALTERLARGEOBJECT[] =
- {"OWNER TO", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERLARGEOBJECT);
- }
+ else if (Matches4("ALTER", "LARGE", "OBJECT", MatchAny))
+ COMPLETE_WITH_CONST("OWNER TO");
/* ALTER MATERIALIZED VIEW */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "MATERIALIZED") == 0 &&
- pg_strcasecmp(prev_wd, "VIEW") == 0)
- {
+ else if (Matches3("ALTER", "MATERIALIZED", "VIEW"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews,
"UNION SELECT 'ALL IN TABLESPACE'");
- }
/* ALTER USER,ROLE <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- !(pg_strcasecmp(prev2_wd, "USER") == 0 && pg_strcasecmp(prev_wd, "MAPPING") == 0) &&
- (pg_strcasecmp(prev2_wd, "USER") == 0 ||
- pg_strcasecmp(prev2_wd, "ROLE") == 0))
+ else if (Matches3("ALTER", "USER|ROLE", MatchAny) &&
+ !TailMatches2("USER", "MAPPING"))
{
static const char *const list_ALTERUSER[] =
{"BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE",
COMPLETE_WITH_LIST(list_ALTERSEQUENCE);
}
-- /* ALTER SEQUENCE <name> NO */
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "SEQUENCE") == 0 &&
- pg_strcasecmp(prev_wd, "NO") == 0)
- {
- static const char *const list_ALTERSEQUENCE2[] =
- {"MINVALUE", "MAXVALUE", "CYCLE", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERSEQUENCE2);
- }
+ else if (Matches4("ALTER", "SEQUENCE", MatchAny, "NO"))
+ COMPLETE_WITH_LIST3("MINVALUE", "MAXVALUE", "CYCLE");
+#ifndef PGXC
+ /* PGXCTODO: This should be re-enabled once SERVER is supported */
/* ALTER SERVER <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "SERVER") == 0)
- {
- static const char *const list_ALTER_SERVER[] =
- {"VERSION", "OPTIONS", "OWNER TO", NULL};
-
- COMPLETE_WITH_LIST(list_ALTER_SERVER);
- }
+ else if (Matches3("ALTER", "SERVER", MatchAny))
+ COMPLETE_WITH_LIST4("VERSION", "OPTIONS", "OWNER TO", "RENAME TO");
+ /* ALTER SERVER <name> VERSION <version> */
+ else if (Matches5("ALTER", "SERVER", MatchAny, "VERSION", MatchAny))
+ COMPLETE_WITH_CONST("OPTIONS");
+#endif
/* ALTER SYSTEM SET, RESET, RESET ALL */
- else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev_wd, "SYSTEM") == 0)
- {
- static const char *const list_ALTERSYSTEM[] =
- {"SET", "RESET", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERSYSTEM);
- }
+ else if (Matches2("ALTER", "SYSTEM"))
+ COMPLETE_WITH_LIST2("SET", "RESET");
/* ALTER SYSTEM SET|RESET <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "SYSTEM") == 0 &&
- (pg_strcasecmp(prev_wd, "SET") == 0 ||
- pg_strcasecmp(prev_wd, "RESET") == 0))
+ else if (Matches3("ALTER", "SYSTEM", "SET|RESET"))
COMPLETE_WITH_QUERY(Query_for_list_of_alter_system_set_vars);
/* ALTER VIEW <name> */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "VIEW") == 0)
- {
- static const char *const list_ALTERVIEW[] =
- {"ALTER COLUMN", "OWNER TO", "RENAME TO", "SET SCHEMA", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERVIEW);
- }
+ else if (Matches3("ALTER", "VIEW", MatchAny))
+ COMPLETE_WITH_LIST4("ALTER COLUMN", "OWNER TO", "RENAME TO",
+ "SET SCHEMA");
/* ALTER MATERIALIZED VIEW <name> */
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "MATERIALIZED") == 0 &&
- pg_strcasecmp(prev2_wd, "VIEW") == 0)
- {
- static const char *const list_ALTERMATVIEW[] =
- {"ALTER COLUMN", "OWNER TO", "RENAME TO", "SET SCHEMA", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERMATVIEW);
- }
+ else if (Matches4("ALTER", "MATERIALIZED", "VIEW", MatchAny))
+ COMPLETE_WITH_LIST4("ALTER COLUMN", "OWNER TO", "RENAME TO",
+ "SET SCHEMA");
+ /* ALTER POLICY <name> */
+ else if (Matches2("ALTER", "POLICY"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_policies);
/* ALTER POLICY <name> ON */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "POLICY") == 0)
+ else if (Matches3("ALTER", "POLICY", MatchAny))
COMPLETE_WITH_CONST("ON");
/* ALTER POLICY <name> ON <table> */
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev_wd, "ON") == 0)
- COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
- /* ALTER POLICY <name> ON <table> - show options */
- else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev4_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev2_wd, "ON") == 0)
+ else if (Matches4("ALTER", "POLICY", MatchAny, "ON"))
{
- static const char *const list_ALTERPOLICY[] =
- {"RENAME TO", "TO", "USING", "WITH CHECK", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERPOLICY);
+ completion_info_charp = prev2_wd;
+ COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_policy);
}
+ /* ALTER POLICY <name> ON <table> - show options */
+ else if (Matches5("ALTER", "POLICY", MatchAny, "ON", MatchAny))
+ COMPLETE_WITH_LIST4("RENAME TO", "TO", "USING (", "WITH CHECK (");
/* ALTER POLICY <name> ON <table> TO <role> */
- else if (pg_strcasecmp(prev6_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev3_wd, "ON") == 0 &&
- pg_strcasecmp(prev_wd, "TO") == 0)
+ else if (Matches6("ALTER", "POLICY", MatchAny, "ON", MatchAny, "TO"))
COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles);
/* ALTER POLICY <name> ON <table> USING ( */
- else if (pg_strcasecmp(prev6_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev3_wd, "ON") == 0 &&
- pg_strcasecmp(prev_wd, "USING") == 0)
+ else if (Matches6("ALTER", "POLICY", MatchAny, "ON", MatchAny, "USING"))
COMPLETE_WITH_CONST("(");
/* ALTER POLICY <name> ON <table> WITH CHECK ( */
- else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev4_wd, "ON") == 0 &&
- pg_strcasecmp(prev2_wd, "WITH") == 0 &&
- pg_strcasecmp(prev_wd, "CHECK") == 0)
+ else if (Matches7("ALTER", "POLICY", MatchAny, "ON", MatchAny, "WITH", "CHECK"))
COMPLETE_WITH_CONST("(");
/* ALTER RULE <name>, add ON */
}
/* ALTER RULE <name> ON <name> */
- else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev4_wd, "RULE") == 0)
+ else if (Matches5("ALTER", "RULE", MatchAny, "ON", MatchAny))
COMPLETE_WITH_CONST("RENAME TO");
+#ifndef PGXC
+ /* PGXCTODO: This should be re-enabled once TRIGGER is supported */
/* ALTER TRIGGER <name>, add ON */
- else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev2_wd, "TRIGGER") == 0)
+ else if (Matches3("ALTER", "TRIGGER", MatchAny))
COMPLETE_WITH_CONST("ON");
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev3_wd, "TRIGGER") == 0)
+ else if (Matches4("ALTER", "TRIGGER", MatchAny, MatchAny))
{
completion_info_charp = prev2_wd;
COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_trigger);
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
/* ALTER TRIGGER <name> ON <name> */
- else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
- pg_strcasecmp(prev4_wd, "TRIGGER") == 0 &&
- pg_strcasecmp(prev2_wd, "ON") == 0)
+ else if (Matches5("ALTER", "TRIGGER", MatchAny, "ON", MatchAny))
COMPLETE_WITH_CONST("RENAME TO");
+#endif
/*
* If we detect ALTER TABLE <name>, suggest sub commands
COMPLETE_WITH_QUERY(Query_for_list_of_roles);
/* BEGIN, END, ABORT */
- else if (pg_strcasecmp(prev_wd, "BEGIN") == 0 ||
- pg_strcasecmp(prev_wd, "END") == 0 ||
- pg_strcasecmp(prev_wd, "ABORT") == 0)
- {
- static const char *const list_TRANS[] =
- {"WORK", "TRANSACTION", NULL};
-
- COMPLETE_WITH_LIST(list_TRANS);
- }
+ else if (Matches1("BEGIN|END|ABORT"))
+ COMPLETE_WITH_LIST2("WORK", "TRANSACTION");
/* COMMIT */
- else if (pg_strcasecmp(prev_wd, "COMMIT") == 0)
- {
- static const char *const list_COMMIT[] =
- {"WORK", "TRANSACTION", "PREPARED", NULL};
-
- COMPLETE_WITH_LIST(list_COMMIT);
- }
+ else if (Matches1("COMMIT"))
+ COMPLETE_WITH_LIST3("WORK", "TRANSACTION", "PREPARED");
/* RELEASE SAVEPOINT */
- else if (pg_strcasecmp(prev_wd, "RELEASE") == 0)
+ else if (Matches1("RELEASE"))
COMPLETE_WITH_CONST("SAVEPOINT");
- /* ROLLBACK*/
- else if (pg_strcasecmp(prev_wd, "ROLLBACK") == 0)
- {
- static const char *const list_TRANS[] =
- {"WORK", "TRANSACTION", "TO SAVEPOINT", "PREPARED", NULL};
-
- COMPLETE_WITH_LIST(list_TRANS);
- }
+#ifdef PGXC
+/* CLEAN CONNECTION */
- else if (pg_strcasecmp(prev2_wd, "CLEAN") == 0 &&
- pg_strcasecmp(prev_wd, "CONNECTION") == 0)
++ else if (Matches2("CLEAN", "CONNECTION"))
+ COMPLETE_WITH_CONST("TO");
- else if (pg_strcasecmp(prev3_wd, "CLEAN") == 0 &&
- pg_strcasecmp(prev2_wd, "CONNECTION") == 0 &&
- pg_strcasecmp(prev_wd, "TO") == 0)
++ else if (Matches3("CLEAN", "CONNECTION", "TO"))
+ /* CLEAN CONNECTION TO */
- {
- static const char *const list_CLEANCONNECTIONOPT[] =
- {"ALL", "COORDINATOR", "NODE", NULL};
-
- COMPLETE_WITH_LIST(list_CLEANCONNECTIONOPT);
- }
- else if (pg_strcasecmp(prev4_wd, "CLEAN") == 0 &&
- pg_strcasecmp(prev3_wd, "CONNECTION") == 0 &&
- pg_strcasecmp(prev2_wd, "TO") == 0 &&
- pg_strcasecmp(prev_wd, "ALL") == 0)
++ COMPLETE_WITH_LIST3("ALL", "COORDINATOR", "NODE");
++ else if (Matches4("CLEAN", "CONNECTION", "TO", "ALL"))
+ COMPLETE_WITH_CONST("FORCE");
- else if (pg_strcasecmp(prev4_wd, "CLEAN") == 0 &&
- pg_strcasecmp(prev3_wd, "CONNECTION") == 0 &&
- pg_strcasecmp(prev2_wd, "TO") == 0 &&
- pg_strcasecmp(prev_wd, "COORDINATOR") == 0)
++ else if (Matches4("CLEAN", "CONNECTION", "TO", "COORDINATOR"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_available_coordinators);
- else if (pg_strcasecmp(prev4_wd, "CLEAN") == 0 &&
- pg_strcasecmp(prev3_wd, "CONNECTION") == 0 &&
- pg_strcasecmp(prev2_wd, "TO") == 0 &&
- pg_strcasecmp(prev_wd, "NODE") == 0)
++ else if (Matches4("CLEAN", "CONNECTION", "TO", "NODE"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_available_datanodes);
- else if (pg_strcasecmp(prev2_wd, "TO") == 0 &&
- pg_strcasecmp(prev_wd, "USER") == 0)
++ else if (Matches2("TO", "USER"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_roles);
- else if (pg_strcasecmp(prev2_wd, "FOR") == 0 &&
- pg_strcasecmp(prev_wd, "DATABASE") == 0)
++ else if (Matches2("FOR", "DATABASE"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_databases);
+#endif
+ /* ROLLBACK */
+ else if (Matches1("ROLLBACK"))
+ COMPLETE_WITH_LIST4("WORK", "TRANSACTION", "TO SAVEPOINT", "PREPARED");
/* CLUSTER */
-
- /*
- * If the previous word is CLUSTER and not WITHOUT produce list of tables
- */
- else if (pg_strcasecmp(prev_wd, "CLUSTER") == 0 &&
- pg_strcasecmp(prev2_wd, "WITHOUT") != 0)
+ else if (Matches1("CLUSTER"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, "UNION SELECT 'VERBOSE'");
-
- /*
- * If the previous words are CLUSTER VERBOSE produce list of tables
- */
- else if (pg_strcasecmp(prev_wd, "VERBOSE") == 0 &&
- pg_strcasecmp(prev2_wd, "CLUSTER") == 0)
+ else if (Matches2("CLUSTER", "VERBOSE"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL);
-
/* If we have CLUSTER <sth>, then add "USING" */
- else if (pg_strcasecmp(prev2_wd, "CLUSTER") == 0 &&
- pg_strcasecmp(prev_wd, "ON") != 0 &&
- pg_strcasecmp(prev_wd, "VERBOSE") != 0)
- {
+ else if (Matches2("CLUSTER", MatchAnyExcept("VERBOSE|ON")))
COMPLETE_WITH_CONST("USING");
- }
/* If we have CLUSTER VERBOSE <sth>, then add "USING" */
- else if (pg_strcasecmp(prev3_wd, "CLUSTER") == 0 &&
- pg_strcasecmp(prev2_wd, "VERBOSE") == 0)
- {
+ else if (Matches3("CLUSTER", "VERBOSE", MatchAny))
COMPLETE_WITH_CONST("USING");
- }
-
- /*
- * If we have CLUSTER <sth> USING, then add the index as well.
- */
- else if (pg_strcasecmp(prev3_wd, "CLUSTER") == 0 &&
- pg_strcasecmp(prev_wd, "USING") == 0)
- {
- completion_info_charp = prev2_wd;
- COMPLETE_WITH_QUERY(Query_for_index_of_table);
- }
-
- /*
- * If we have CLUSTER VERBOSE <sth> USING, then add the index as well.
- */
- else if (pg_strcasecmp(prev4_wd, "CLUSTER") == 0 &&
- pg_strcasecmp(prev3_wd, "VERBOSE") == 0 &&
- pg_strcasecmp(prev_wd, "USING") == 0)
+ /* If we have CLUSTER <sth> USING, then add the index as well */
+ else if (Matches3("CLUSTER", MatchAny, "USING") ||
+ Matches4("CLUSTER", "VERBOSE", MatchAny, "USING"))
{
completion_info_charp = prev2_wd;
COMPLETE_WITH_QUERY(Query_for_index_of_table);
* Complete INDEX <name> ON <table> with a list of table columns (which
* should really be in parens)
*/
- else if ((pg_strcasecmp(prev4_wd, "INDEX") == 0 ||
- pg_strcasecmp(prev3_wd, "INDEX") == 0 ||
- pg_strcasecmp(prev3_wd, "CONCURRENTLY") == 0) &&
- pg_strcasecmp(prev2_wd, "ON") == 0)
- {
- static const char *const list_CREATE_INDEX2[] =
- {"(", "USING", NULL};
-
- COMPLETE_WITH_LIST(list_CREATE_INDEX2);
- }
- else if ((pg_strcasecmp(prev5_wd, "INDEX") == 0 ||
- pg_strcasecmp(prev4_wd, "INDEX") == 0 ||
- pg_strcasecmp(prev4_wd, "CONCURRENTLY") == 0) &&
- pg_strcasecmp(prev3_wd, "ON") == 0 &&
- pg_strcasecmp(prev_wd, "(") == 0)
+ else if (TailMatches4("INDEX", MatchAny, "ON", MatchAny) ||
+ TailMatches3("INDEX|CONCURRENTLY", "ON", MatchAny))
+ COMPLETE_WITH_LIST2("(", "USING");
+ else if (TailMatches5("INDEX", MatchAny, "ON", MatchAny, "(") ||
+ TailMatches4("INDEX|CONCURRENTLY", "ON", MatchAny, "("))
COMPLETE_WITH_ATTR(prev2_wd, "");
/* same if you put in USING */
- else if (pg_strcasecmp(prev5_wd, "ON") == 0 &&
- pg_strcasecmp(prev3_wd, "USING") == 0 &&
- pg_strcasecmp(prev_wd, "(") == 0)
+ else if (TailMatches5("ON", MatchAny, "USING", MatchAny, "("))
COMPLETE_WITH_ATTR(prev4_wd, "");
/* Complete USING with an index method */
- else if ((pg_strcasecmp(prev6_wd, "INDEX") == 0 ||
- pg_strcasecmp(prev5_wd, "INDEX") == 0 ||
- pg_strcasecmp(prev4_wd, "INDEX") == 0) &&
- pg_strcasecmp(prev3_wd, "ON") == 0 &&
- pg_strcasecmp(prev_wd, "USING") == 0)
+ else if (TailMatches6("INDEX", MatchAny, MatchAny, "ON", MatchAny, "USING") ||
+ TailMatches5("INDEX", MatchAny, "ON", MatchAny, "USING") ||
+ TailMatches4("INDEX", "ON", MatchAny, "USING"))
COMPLETE_WITH_QUERY(Query_for_list_of_access_methods);
- else if (pg_strcasecmp(prev4_wd, "ON") == 0 &&
- (!(pg_strcasecmp(prev6_wd, "POLICY") == 0) &&
- !(pg_strcasecmp(prev4_wd, "FOR") == 0)) &&
- pg_strcasecmp(prev2_wd, "USING") == 0)
+ else if (TailMatches4("ON", MatchAny, "USING", MatchAny) &&
+ !TailMatches6("POLICY", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny) &&
+ !TailMatches4("FOR", MatchAny, MatchAny, MatchAny))
COMPLETE_WITH_CONST("(");
-
+#ifdef PGXC
+/* CREATE NODE */
- else if (pg_strcasecmp(prev3_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev2_wd, "NODE") == 0)
++ else if (Matches2("CREATE", "NODE"))
+ COMPLETE_WITH_CONST("WITH");
- else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev3_wd, "NODE") == 0 &&
- pg_strcasecmp(prev_wd, "WITH") == 0)
++ else if (Matches3("CREATE", "NODE", "WITH"))
+ COMPLETE_WITH_CONST("(");
- else if (pg_strcasecmp(prev5_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev4_wd, "NODE") == 0 &&
- pg_strcasecmp(prev2_wd, "WITH") == 0)
- {
- static const char *const list_NODEOPT[] =
- {"TYPE", "HOST", "PORT", "PRIMARY", "PREFERRED", NULL};
-
- COMPLETE_WITH_LIST(list_NODEOPT);
- }
++ else if (Matches4("CREATE", "NODE", "WITH", "("))
++ COMPLETE_WITH_LIST5("TYPE", "HOST", "PORT", "PRIMARY", "PREFERRED");
+/* CREATE NODEGROUP */
- else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev3_wd, "NODE") == 0 &&
- pg_strcasecmp(prev2_wd, "GROUP") == 0)
++ else if (Matches3("CREATE", "NODE", "GROUP"))
+ COMPLETE_WITH_CONST("WITH");
+#endif
/* CREATE POLICY */
/* Complete "CREATE POLICY <name> ON" */
- else if (pg_strcasecmp(prev3_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev2_wd, "POLICY") == 0)
+ else if (Matches3("CREATE", "POLICY", MatchAny))
COMPLETE_WITH_CONST("ON");
/* Complete "CREATE POLICY <name> ON <table>" */
- else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev3_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev_wd, "ON") == 0)
+ else if (Matches4("CREATE", "POLICY", MatchAny, "ON"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
/* Complete "CREATE POLICY <name> ON <table> FOR|TO|USING|WITH CHECK" */
- else if (pg_strcasecmp(prev5_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev4_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev2_wd, "ON") == 0)
- {
- static const char *const list_POLICYOPTIONS[] =
- {"FOR", "TO", "USING", "WITH CHECK", NULL};
-
- COMPLETE_WITH_LIST(list_POLICYOPTIONS);
- }
-
- /*
- * Complete "CREATE POLICY <name> ON <table> FOR
- * ALL|SELECT|INSERT|UPDATE|DELETE"
- */
- else if (pg_strcasecmp(prev6_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev3_wd, "ON") == 0 &&
- pg_strcasecmp(prev_wd, "FOR") == 0)
- {
- static const char *const list_POLICYCMDS[] =
- {"ALL", "SELECT", "INSERT", "UPDATE", "DELETE", NULL};
-
- COMPLETE_WITH_LIST(list_POLICYCMDS);
- }
+ else if (Matches5("CREATE", "POLICY", MatchAny, "ON", MatchAny))
+ COMPLETE_WITH_LIST4("FOR", "TO", "USING (", "WITH CHECK (");
+ /* CREATE POLICY <name> ON <table> FOR ALL|SELECT|INSERT|UPDATE|DELETE */
+ else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR"))
+ COMPLETE_WITH_LIST5("ALL", "SELECT", "INSERT", "UPDATE", "DELETE");
/* Complete "CREATE POLICY <name> ON <table> FOR INSERT TO|WITH CHECK" */
- else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev4_wd, "ON") == 0 &&
- pg_strcasecmp(prev2_wd, "FOR") == 0 &&
- pg_strcasecmp(prev_wd, "INSERT") == 0)
- {
- static const char *const list_POLICYOPTIONS[] =
- {"TO", "WITH CHECK", NULL};
-
- COMPLETE_WITH_LIST(list_POLICYOPTIONS);
- }
-
- /*
- * Complete "CREATE POLICY <name> ON <table> FOR SELECT TO|USING" Complete
- * "CREATE POLICY <name> ON <table> FOR DELETE TO|USING"
- */
- else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev4_wd, "ON") == 0 &&
- pg_strcasecmp(prev2_wd, "FOR") == 0 &&
- (pg_strcasecmp(prev_wd, "SELECT") == 0 ||
- pg_strcasecmp(prev_wd, "DELETE") == 0))
- {
- static const char *const list_POLICYOPTIONS[] =
- {"TO", "USING", NULL};
-
- COMPLETE_WITH_LIST(list_POLICYOPTIONS);
- }
-
- /*
- * Complete "CREATE POLICY <name> ON <table> FOR ALL TO|USING|WITH CHECK"
- * Complete "CREATE POLICY <name> ON <table> FOR UPDATE TO|USING|WITH
- * CHECK"
- */
- else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev4_wd, "ON") == 0 &&
- pg_strcasecmp(prev2_wd, "FOR") == 0 &&
- (pg_strcasecmp(prev_wd, "ALL") == 0 ||
- pg_strcasecmp(prev_wd, "UPDATE") == 0))
- {
- static const char *const list_POLICYOPTIONS[] =
- {"TO", "USING", "WITH CHECK", NULL};
-
- COMPLETE_WITH_LIST(list_POLICYOPTIONS);
- }
+ else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "INSERT"))
+ COMPLETE_WITH_LIST2("TO", "WITH CHECK (");
+ /* Complete "CREATE POLICY <name> ON <table> FOR SELECT|DELETE TO|USING" */
+ else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "SELECT|DELETE"))
+ COMPLETE_WITH_LIST2("TO", "USING (");
+ /* CREATE POLICY <name> ON <table> FOR ALL|UPDATE TO|USING|WITH CHECK */
+ else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "ALL|UPDATE"))
+ COMPLETE_WITH_LIST3("TO", "USING (", "WITH CHECK (");
/* Complete "CREATE POLICY <name> ON <table> TO <role>" */
- else if (pg_strcasecmp(prev6_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev3_wd, "ON") == 0 &&
- pg_strcasecmp(prev_wd, "TO") == 0)
+ else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "TO"))
COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles);
/* Complete "CREATE POLICY <name> ON <table> USING (" */
- else if (pg_strcasecmp(prev6_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
- pg_strcasecmp(prev3_wd, "ON") == 0 &&
- pg_strcasecmp(prev_wd, "USING") == 0)
+ else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "USING"))
COMPLETE_WITH_CONST("(");
/* CREATE RULE */
* complete CREATE TRIGGER <name> BEFORE,AFTER event ON with a list of
* tables
*/
- else if (pg_strcasecmp(prev5_wd, "TRIGGER") == 0 &&
- (pg_strcasecmp(prev3_wd, "BEFORE") == 0 ||
- pg_strcasecmp(prev3_wd, "AFTER") == 0) &&
- pg_strcasecmp(prev_wd, "ON") == 0)
+ else if (TailMatches6("CREATE", "TRIGGER", MatchAny, "BEFORE|AFTER", MatchAny, "ON"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
/* complete CREATE TRIGGER ... INSTEAD OF event ON with a list of views */
- else if (pg_strcasecmp(prev4_wd, "INSTEAD") == 0 &&
- pg_strcasecmp(prev3_wd, "OF") == 0 &&
- pg_strcasecmp(prev_wd, "ON") == 0)
+ else if (TailMatches7("CREATE", "TRIGGER", MatchAny, "INSTEAD", "OF", MatchAny, "ON"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_views, NULL);
/* complete CREATE TRIGGER ... EXECUTE with PROCEDURE */
- else if (pg_strcasecmp(prev_wd, "EXECUTE") == 0 &&
- prev2_wd[0] != '\0')
+ else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches1("EXECUTE"))
COMPLETE_WITH_CONST("PROCEDURE");
+#endif
/* CREATE ROLE,USER,GROUP <name> */
- else if (pg_strcasecmp(prev3_wd, "CREATE") == 0 &&
- !(pg_strcasecmp(prev2_wd, "USER") == 0 && pg_strcasecmp(prev_wd, "MAPPING") == 0) &&
- (pg_strcasecmp(prev2_wd, "ROLE") == 0 ||
- pg_strcasecmp(prev2_wd, "GROUP") == 0 || pg_strcasecmp(prev2_wd, "USER") == 0))
+ else if (Matches3("CREATE", "ROLE|GROUP|USER", MatchAny) &&
+ !TailMatches2("USER", "MAPPING"))
{
static const char *const list_CREATEROLE[] =
{"ADMIN", "BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE",
/* XXX: implement tab completion for DELETE ... USING */
/* DISCARD */
- else if (pg_strcasecmp(prev_wd, "DISCARD") == 0)
- {
- static const char *const list_DISCARD[] =
- {"ALL", "PLANS", "SEQUENCES", "TEMP", NULL};
-
- COMPLETE_WITH_LIST(list_DISCARD);
- }
+ else if (Matches1("DISCARD"))
+ COMPLETE_WITH_LIST4("ALL", "PLANS", "SEQUENCES", "TEMP");
/* DO */
-
- /*
- * Complete DO with LANGUAGE.
- */
- else if (pg_strcasecmp(prev_wd, "DO") == 0)
- {
- static const char *const list_DO[] =
- {"LANGUAGE", NULL};
-
- COMPLETE_WITH_LIST(list_DO);
- }
-
- /* DROP (when not the previous word) */
- /* DROP AGGREGATE */
- else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
- pg_strcasecmp(prev2_wd, "AGGREGATE") == 0)
+ else if (Matches1("DO"))
+ COMPLETE_WITH_CONST("LANGUAGE");
+
+ /* DROP */
+ /* Complete DROP object with CASCADE / RESTRICT */
+ else if (Matches3("DROP",
+ "COLLATION|CONVERSION|DOMAIN|EXTENSION|LANGUAGE|SCHEMA|SEQUENCE|SERVER|TABLE|TYPE|VIEW",
+ MatchAny) ||
+ Matches4("DROP", "ACCESS", "METHOD", MatchAny) ||
+ (Matches4("DROP", "AGGREGATE|FUNCTION", MatchAny, MatchAny) &&
+ ends_with(prev_wd, ')')) ||
+ Matches4("DROP", "EVENT", "TRIGGER", MatchAny) ||
+ Matches5("DROP", "FOREIGN", "DATA", "WRAPPER", MatchAny) ||
+ Matches4("DROP", "FOREIGN", "TABLE", MatchAny) ||
+ Matches5("DROP", "TEXT", "SEARCH", "CONFIGURATION|DICTIONARY|PARSER|TEMPLATE", MatchAny))
+ COMPLETE_WITH_LIST2("CASCADE", "RESTRICT");
+
+ /* help completing some of the variants */
+ else if (Matches3("DROP", "AGGREGATE|FUNCTION", MatchAny))
COMPLETE_WITH_CONST("(");
+ else if (Matches4("DROP", "AGGREGATE|FUNCTION", MatchAny, "("))
+ COMPLETE_WITH_FUNCTION_ARG(prev2_wd);
+ else if (Matches2("DROP", "FOREIGN"))
+ COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE");
- /* DROP object with CASCADE / RESTRICT */
- else if ((pg_strcasecmp(prev3_wd, "DROP") == 0 &&
- (pg_strcasecmp(prev2_wd, "COLLATION") == 0 ||
- pg_strcasecmp(prev2_wd, "CONVERSION") == 0 ||
- pg_strcasecmp(prev2_wd, "DOMAIN") == 0 ||
- pg_strcasecmp(prev2_wd, "EXTENSION") == 0 ||
- pg_strcasecmp(prev2_wd, "FUNCTION") == 0 ||
- pg_strcasecmp(prev2_wd, "INDEX") == 0 ||
- pg_strcasecmp(prev2_wd, "LANGUAGE") == 0 ||
- pg_strcasecmp(prev2_wd, "SCHEMA") == 0 ||
- pg_strcasecmp(prev2_wd, "SEQUENCE") == 0 ||
- pg_strcasecmp(prev2_wd, "SERVER") == 0 ||
- pg_strcasecmp(prev2_wd, "TABLE") == 0 ||
- pg_strcasecmp(prev2_wd, "TYPE") == 0 ||
- pg_strcasecmp(prev2_wd, "VIEW") == 0)) ||
- (pg_strcasecmp(prev4_wd, "DROP") == 0 &&
- pg_strcasecmp(prev3_wd, "AGGREGATE") == 0 &&
- prev_wd[strlen(prev_wd) - 1] == ')') ||
- (pg_strcasecmp(prev4_wd, "DROP") == 0 &&
- pg_strcasecmp(prev3_wd, "EVENT") == 0 &&
- pg_strcasecmp(prev2_wd, "TRIGGER") == 0) ||
- #ifndef PGXC
- /* PGXCTODO: This should be re-enabled once FOREIGN DATA WRAPPER is supported */
- (pg_strcasecmp(prev5_wd, "DROP") == 0 &&
- pg_strcasecmp(prev4_wd, "FOREIGN") == 0 &&
- pg_strcasecmp(prev3_wd, "DATA") == 0 &&
- pg_strcasecmp(prev2_wd, "WRAPPER") == 0) ||
- #endif
- (pg_strcasecmp(prev5_wd, "DROP") == 0 &&
- pg_strcasecmp(prev4_wd, "TEXT") == 0 &&
- pg_strcasecmp(prev3_wd, "SEARCH") == 0 &&
- (pg_strcasecmp(prev2_wd, "CONFIGURATION") == 0 ||
- pg_strcasecmp(prev2_wd, "DICTIONARY") == 0 ||
- pg_strcasecmp(prev2_wd, "PARSER") == 0 ||
- pg_strcasecmp(prev2_wd, "TEMPLATE") == 0))
- )
- {
- if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
- pg_strcasecmp(prev2_wd, "FUNCTION") == 0)
- {
- COMPLETE_WITH_CONST("(");
- }
- else
- {
- static const char *const list_DROPCR[] =
- {"CASCADE", "RESTRICT", NULL};
-
- COMPLETE_WITH_LIST(list_DROPCR);
- }
- }
- #ifndef PGXC
- /* PGXCTODO: This should be re-enabled once FOREIGN DATA WRAPPER is supported */
- else if (pg_strcasecmp(prev2_wd, "DROP") == 0 &&
- pg_strcasecmp(prev_wd, "FOREIGN") == 0)
- {
- static const char *const drop_CREATE_FOREIGN[] =
- {"DATA WRAPPER", "TABLE", NULL};
-
- COMPLETE_WITH_LIST(drop_CREATE_FOREIGN);
- }
- #endif
+ /* DROP INDEX */
+ else if (Matches2("DROP", "INDEX"))
+ COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes,
+ " UNION SELECT 'CONCURRENTLY'");
+ else if (Matches3("DROP", "INDEX", "CONCURRENTLY"))
+ COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL);
+ else if (Matches3("DROP", "INDEX", MatchAny))
+ COMPLETE_WITH_LIST2("CASCADE", "RESTRICT");
+ else if (Matches4("DROP", "INDEX", "CONCURRENTLY", MatchAny))
+ COMPLETE_WITH_LIST2("CASCADE", "RESTRICT");
/* DROP MATERIALIZED VIEW */
- else if (pg_strcasecmp(prev2_wd, "DROP") == 0 &&
- pg_strcasecmp(prev_wd, "MATERIALIZED") == 0)
- {
+ else if (Matches2("DROP", "MATERIALIZED"))
COMPLETE_WITH_CONST("VIEW");
- }
- else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
- pg_strcasecmp(prev2_wd, "MATERIALIZED") == 0 &&
- pg_strcasecmp(prev_wd, "VIEW") == 0)
- {
+ else if (Matches3("DROP", "MATERIALIZED", "VIEW"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews, NULL);
- }
- else if (pg_strcasecmp(prev4_wd, "DROP") == 0 &&
- (pg_strcasecmp(prev3_wd, "AGGREGATE") == 0 ||
- pg_strcasecmp(prev3_wd, "FUNCTION") == 0) &&
- pg_strcasecmp(prev_wd, "(") == 0)
- COMPLETE_WITH_FUNCTION_ARG(prev2_wd);
/* DROP OWNED BY */
- else if (pg_strcasecmp(prev2_wd, "DROP") == 0 &&
- pg_strcasecmp(prev_wd, "OWNED") == 0)
+ else if (Matches2("DROP", "OWNED"))
COMPLETE_WITH_CONST("BY");
- else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
- pg_strcasecmp(prev2_wd, "OWNED") == 0 &&
- pg_strcasecmp(prev_wd, "BY") == 0)
+ else if (Matches3("DROP", "OWNED", "BY"))
COMPLETE_WITH_QUERY(Query_for_list_of_roles);
- else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
- pg_strcasecmp(prev2_wd, "TEXT") == 0 &&
- pg_strcasecmp(prev_wd, "SEARCH") == 0)
- {
- static const char *const list_ALTERTEXTSEARCH[] =
- {"CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE", NULL};
-
- COMPLETE_WITH_LIST(list_ALTERTEXTSEARCH);
- }
+#ifdef PGXC
+ /* DROP NODE */
- else if (pg_strcasecmp(prev2_wd, "DROP") == 0 &&
- pg_strcasecmp(prev_wd, "NODE") == 0)
++ else if (Matches2("DROP", "NODE"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_available_nodenames); /* Should test this code if complesion is not confused with DROP NODE GROUP */
+ /* DROP NODE GROUP */
- else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
- pg_strcasecmp(prev2_wd, "NODE") == 0 &&
- pg_strcasecmp(prev_wd, "GROUP") == 0)
++ else if (Matches3("DROP", "NODE", "GROUP"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_available_nodegroup_names);
+/* EXECUTE DIRECT */
- else if (pg_strcasecmp(prev2_wd, "EXECUTE") == 0 &&
- pg_strcasecmp(prev_wd, "DIRECT") == 0)
++ else if (Matches2("EXECUTE", "DIRECT"))
+ COMPLETE_WITH_CONST("ON");
- else if (pg_strcasecmp(prev3_wd, "EXECUTE") == 0 &&
- pg_strcasecmp(prev2_wd, "DIRECT") == 0 &&
- pg_strcasecmp(prev_wd, "ON") == 0)
++ else if (Matches3("EXECUTE", "DIRECT", "ON"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_available_nodenames);
+#endif
+ else if (Matches3("DROP", "TEXT", "SEARCH"))
+ COMPLETE_WITH_LIST4("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE");
/* DROP TRIGGER */
- else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
- pg_strcasecmp(prev2_wd, "TRIGGER") == 0)
- {
+ else if (Matches3("DROP", "TRIGGER", MatchAny))
COMPLETE_WITH_CONST("ON");
- }
- else if (pg_strcasecmp(prev4_wd, "DROP") == 0 &&
- pg_strcasecmp(prev3_wd, "TRIGGER") == 0 &&
- pg_strcasecmp(prev_wd, "ON") == 0)
+ else if (Matches4("DROP", "TRIGGER", MatchAny, "ON"))
{
completion_info_charp = prev2_wd;
COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_trigger);
* but we may as well tab-complete both: perhaps some users prefer one
* variant or the other.
*/
- else if (pg_strcasecmp(prev3_wd, "FETCH") == 0 ||
- pg_strcasecmp(prev3_wd, "MOVE") == 0)
- {
- static const char *const list_FROMIN[] =
- {"FROM", "IN", NULL};
-
- COMPLETE_WITH_LIST(list_FROMIN);
- }
+ else if (Matches3("FETCH|MOVE", MatchAny, MatchAny))
+ COMPLETE_WITH_LIST2("FROM", "IN");
+#ifndef PGXC
+ /* PGXCTODO: This should be re-enabled once FOREIGN DATA WRAPPER is supported */
/* FOREIGN DATA WRAPPER */
/* applies in ALTER/DROP FDW and in CREATE SERVER */
- else if (pg_strcasecmp(prev4_wd, "CREATE") != 0 &&
- pg_strcasecmp(prev3_wd, "FOREIGN") == 0 &&
- pg_strcasecmp(prev2_wd, "DATA") == 0 &&
- pg_strcasecmp(prev_wd, "WRAPPER") == 0)
+ else if (TailMatches3("FOREIGN", "DATA", "WRAPPER") &&
+ !TailMatches4("CREATE", MatchAny, MatchAny, MatchAny))
COMPLETE_WITH_QUERY(Query_for_list_of_fdws);
+ /* applies in CREATE SERVER */
+ else if (TailMatches4("FOREIGN", "DATA", "WRAPPER", MatchAny) &&
+ HeadMatches2("CREATE", "SERVER"))
+ COMPLETE_WITH_CONST("OPTIONS");
/* FOREIGN TABLE */
- else if (pg_strcasecmp(prev3_wd, "CREATE") != 0 &&
- pg_strcasecmp(prev2_wd, "FOREIGN") == 0 &&
- pg_strcasecmp(prev_wd, "TABLE") == 0)
+ else if (TailMatches2("FOREIGN", "TABLE") &&
+ !TailMatches3("CREATE", MatchAny, MatchAny))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_foreign_tables, NULL);
+#endif
- /* GRANT && REVOKE */
+ /* FOREIGN SERVER */
+ else if (TailMatches2("FOREIGN", "SERVER"))
+ COMPLETE_WITH_QUERY(Query_for_list_of_servers);
+
+ /* GRANT && REVOKE --- is allowed inside CREATE SCHEMA, so use TailMatches */
/* Complete GRANT/REVOKE with a list of roles and privileges */
- else if (pg_strcasecmp(prev_wd, "GRANT") == 0 ||
- pg_strcasecmp(prev_wd, "REVOKE") == 0)
- {
+ else if (TailMatches1("GRANT|REVOKE"))
COMPLETE_WITH_QUERY(Query_for_list_of_roles
" UNION SELECT 'SELECT'"
" UNION SELECT 'INSERT'"
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
/* UNLISTEN */
- else if (pg_strcasecmp(prev_wd, "UNLISTEN") == 0)
+ else if (Matches1("UNLISTEN"))
COMPLETE_WITH_QUERY("SELECT pg_catalog.quote_ident(channel) FROM pg_catalog.pg_listening_channels() AS channel WHERE substring(pg_catalog.quote_ident(channel),1,%d)='%s' UNION SELECT '*'");
- /* UPDATE */
+ /* UPDATE --- can be inside EXPLAIN, RULE, etc */
/* If prev. word is UPDATE suggest a list of tables */
- else if (pg_strcasecmp(prev_wd, "UPDATE") == 0)
+ else if (TailMatches1("UPDATE"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_updatables, NULL);
/* Complete UPDATE <table> with "SET" */
- else if (pg_strcasecmp(prev2_wd, "UPDATE") == 0)
+ else if (TailMatches2("UPDATE", MatchAny))
COMPLETE_WITH_CONST("SET");
-
- /*
- * If the previous word is SET (and it wasn't caught above as the _first_
- * word) the word before it was (hopefully) a table name and we'll now
- * make a list of attributes.
- */
- else if (pg_strcasecmp(prev_wd, "SET") == 0)
+ /* Complete UPDATE <table> SET with list of attributes */
+ else if (TailMatches3("UPDATE", MatchAny, "SET"))
COMPLETE_WITH_ATTR(prev2_wd, "");
-
- /* UPDATE xx SET yy = */
- else if (pg_strcasecmp(prev2_wd, "SET") == 0 &&
- pg_strcasecmp(prev4_wd, "UPDATE") == 0)
+ /* UPDATE <table> SET <attr> = */
+ else if (TailMatches4("UPDATE", MatchAny, "SET", MatchAny))
COMPLETE_WITH_CONST("=");
+#ifndef PGXC
+ /* PGXCTODO: This should be re-enabled once USER MAPPING is supported */
/* USER MAPPING */
- else if ((pg_strcasecmp(prev3_wd, "ALTER") == 0 ||
- pg_strcasecmp(prev3_wd, "CREATE") == 0 ||
- pg_strcasecmp(prev3_wd, "DROP") == 0) &&
- pg_strcasecmp(prev2_wd, "USER") == 0 &&
- pg_strcasecmp(prev_wd, "MAPPING") == 0)
+ else if (Matches3("ALTER|CREATE|DROP", "USER", "MAPPING"))
COMPLETE_WITH_CONST("FOR");
- else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev3_wd, "USER") == 0 &&
- pg_strcasecmp(prev2_wd, "MAPPING") == 0 &&
- pg_strcasecmp(prev_wd, "FOR") == 0)
+ else if (Matches4("CREATE", "USER", "MAPPING", "FOR"))
COMPLETE_WITH_QUERY(Query_for_list_of_roles
" UNION SELECT 'CURRENT_USER'"
" UNION SELECT 'PUBLIC'"
# their *.o siblings as well, which do have proper dependencies. It's
# a hack that might fail someday if there is a *_srv.o without a
# corresponding *.o, but it works for now.
+ifeq ($(genmsgids), yes)
+PGXL_MSG_FILEID := 1
+%_srv.o: %.c %.o
+ $(CC) $(CFLAGS) -DPGXL_MSG_MODULE=$(PGXL_MSG_MODULE) -DPGXL_MSG_FILEID=$(PGXL_MSG_FILEID) $(subst -DFRONTEND,, $(CPPFLAGS)) -c $< -o $@
+ $(eval PGXL_MSG_FILEID := $(shell expr $(PGXL_MSG_FILEID) + 1))
+else
%_srv.o: %.c %.o
- $(CC) $(CFLAGS) $(subst -DFRONTEND,, $(CPPFLAGS)) -c $< -o $@
+ $(CC) $(CFLAGS) $(subst -DFRONTEND ,, $(CPPFLAGS)) -c $< -o $@
+endif
$(OBJS_SRV): | submake-errcodes
all: pg_config.h pg_config_ext.h pg_config_os.h
- # Subdirectories containing headers for server-side dev
- SUBDIRS = access bootstrap catalog commands common datatype executor foreign \
+ # Subdirectories containing installable headers
+ SUBDIRS = access bootstrap catalog commands common datatype \
+ executor fe_utils foreign \
- lib libpq mb nodes optimizer parser postmaster regex replication \
+ lib libpq mb nodes optimizer parser pgxc postmaster regex replication \
rewrite storage tcop snowball snowball/libstemmer tsearch \
tsearch/dicts utils port port/atomics port/win32 port/win32_msvc \
port/win32_msvc/sys port/win32/arpa port/win32/netinet \
* POSTGRES heap tuple definitions.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/access/htup.h
PG_RMGR(RM_BRIN_ID, "BRIN", brin_redo, brin_desc, brin_identify, NULL, NULL)
PG_RMGR(RM_COMMIT_TS_ID, "CommitTs", commit_ts_redo, commit_ts_desc, commit_ts_identify, NULL, NULL)
PG_RMGR(RM_REPLORIGIN_ID, "ReplicationOrigin", replorigin_redo, replorigin_desc, replorigin_identify, NULL, NULL)
+#ifdef PGXC
+PG_RMGR(RM_BARRIER_ID, "Barrier", barrier_redo, barrier_desc, NULL, NULL, NULL)
+#endif
+ PG_RMGR(RM_GENERIC_ID, "Generic", generic_redo, generic_desc, generic_identify, NULL, NULL)
+ PG_RMGR(RM_LOGICALMSG_ID, "LogicalMessage", logicalmsg_redo, logicalmsg_desc, logicalmsg_identify, NULL, NULL)
* postgres transaction access method support code
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/access/transam.h
*
* postgres transaction system definitions
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/access/xact.h
*
* include file for the bootstrapping code
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/bootstrap/bootstrap.h
*
* prototypes for functions in backend/catalog/catalog.c
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/catalog/catalog.h
* Routines to support inter-object dependencies.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/catalog/dependency.h
*
* prototypes for functions in backend/catalog/heap.c
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/catalog/heap.h
*
* on system catalogs
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/catalog/indexing.h
*
* prototypes for functions in backend/catalog/namespace.c
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/catalog/namespace.h
extern bool isOtherTempNamespace(Oid namespaceId);
extern int GetTempNamespaceBackendId(Oid namespaceId);
extern Oid GetTempToastNamespace(void);
+ extern void GetTempNamespaceState(Oid *tempNamespaceId,
+ Oid *tempToastNamespaceId);
+ extern void SetTempNamespaceState(Oid tempNamespaceId,
+ Oid tempToastNamespaceId);
extern void ResetTempTableNamespace(void);
+#ifdef XCP
+extern void ForgetTempTableNamespace(void);
+#endif
extern OverrideSearchPath *GetOverrideSearchPath(MemoryContext context);
extern OverrideSearchPath *CopyOverrideSearchPath(OverrideSearchPath *path);
* along with the relation's initial contents.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/catalog/pg_aggregate.h
* aggkind aggregate kind, see AGGKIND_ categories below
* aggnumdirectargs number of arguments that are "direct" arguments
* aggtransfn transition function
+#ifdef PGXC
+ * aggcollectfn collectition function
+#endif
* aggfinalfn final function (0 if none)
+ * aggcombinefn combine function (0 if none)
+ * aggserialfn function to convert transtype to bytea (0 if none)
+ * aggdeserialfn function to convert bytea to transtype (0 if none)
* aggmtransfn forward function for moving-aggregate mode (0 if none)
* aggminvtransfn inverse function for moving-aggregate mode (0 if none)
* aggmfinalfn final function for moving-aggregate mode (0 if none)
char aggkind;
int16 aggnumdirectargs;
regproc aggtransfn;
+ regproc aggcollectfn; /* PGXC */
regproc aggfinalfn;
+ regproc aggcombinefn;
+ regproc aggserialfn;
+ regproc aggdeserialfn;
regproc aggmtransfn;
regproc aggminvtransfn;
regproc aggmfinalfn;
#define Anum_pg_aggregate_aggkind 2
#define Anum_pg_aggregate_aggnumdirectargs 3
#define Anum_pg_aggregate_aggtransfn 4
-#define Anum_pg_aggregate_aggfinalfn 5
-#define Anum_pg_aggregate_aggcombinefn 6
-#define Anum_pg_aggregate_aggserialfn 7
-#define Anum_pg_aggregate_aggdeserialfn 8
-#define Anum_pg_aggregate_aggmtransfn 9
-#define Anum_pg_aggregate_aggminvtransfn 10
-#define Anum_pg_aggregate_aggmfinalfn 11
-#define Anum_pg_aggregate_aggfinalextra 12
-#define Anum_pg_aggregate_aggmfinalextra 13
-#define Anum_pg_aggregate_aggsortop 14
-#define Anum_pg_aggregate_aggtranstype 15
-#define Anum_pg_aggregate_aggtransspace 16
-#define Anum_pg_aggregate_aggmtranstype 17
-#define Anum_pg_aggregate_aggmtransspace 18
-#define Anum_pg_aggregate_agginitval 19
-#define Anum_pg_aggregate_aggminitval 20
+#define Anum_pg_aggregate_aggcollectfn 5
+#define Anum_pg_aggregate_aggfinalfn 6
- #define Anum_pg_aggregate_aggmtransfn 7
- #define Anum_pg_aggregate_aggminvtransfn 8
- #define Anum_pg_aggregate_aggmfinalfn 9
- #define Anum_pg_aggregate_aggfinalextra 10
- #define Anum_pg_aggregate_aggmfinalextra 11
- #define Anum_pg_aggregate_aggsortop 12
- #define Anum_pg_aggregate_aggtranstype 13
- #define Anum_pg_aggregate_aggcollecttype 14
- #define Anum_pg_aggregate_aggtransspace 15
- #define Anum_pg_aggregate_aggmtranstype 16
- #define Anum_pg_aggregate_aggmtransspace 17
- #define Anum_pg_aggregate_agginitval 18
- #define Anum_pg_aggregate_agginitcollect 19
- #define Anum_pg_aggregate_aggminitval 20
++#define Anum_pg_aggregate_aggcombinefn 7
++#define Anum_pg_aggregate_aggserialfn 8
++#define Anum_pg_aggregate_aggdeserialfn 9
++#define Anum_pg_aggregate_aggmtransfn 10
++#define Anum_pg_aggregate_aggminvtransfn 11
++#define Anum_pg_aggregate_aggmfinalfn 12
++#define Anum_pg_aggregate_aggfinalextra 13
++#define Anum_pg_aggregate_aggmfinalextra 14
++#define Anum_pg_aggregate_aggsortop 15
++#define Anum_pg_aggregate_aggtranstype 16
++#define Anum_pg_aggregate_aggcollecttype 17
++#define Anum_pg_aggregate_aggtransspace 18
++#define Anum_pg_aggregate_aggmtranstype 19
++#define Anum_pg_aggregate_aggmtransspace 20
++#define Anum_pg_aggregate_agginitval 21
++#define Anum_pg_aggregate_agginitcollect 22
++#define Anum_pg_aggregate_aggminitval 23
/*
* Symbolic values for aggkind column. We distinguish normal aggregates
*/
/* avg */
- DATA(insert ( 2100 n 0 int8_avg_accum numeric_poly_collect numeric_poly_avg int8_avg_accum int8_avg_accum_inv numeric_poly_avg f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2101 n 0 int4_avg_accum int8_avg_collect int8_avg int4_avg_accum int4_avg_accum_inv int8_avg f f 0 1016 1016 0 1016 0 "{0,0}" "{0,0}" "{0,0}" ));
- DATA(insert ( 2102 n 0 int2_avg_accum int8_avg_collect int8_avg int2_avg_accum int2_avg_accum_inv int8_avg f f 0 1016 1016 0 1016 0 "{0,0}" "{0,0}" "{0,0}" ));
- DATA(insert ( 2103 n 0 numeric_avg_accum numeric_collect numeric_avg numeric_avg_accum numeric_accum_inv numeric_avg f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
- DATA(insert ( 2104 n 0 float4_accum float8_collect float8_avg - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2105 n 0 float8_accum float8_collect float8_avg - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2106 n 0 interval_accum interval_collect interval_avg interval_accum interval_accum_inv interval_avg f f 0 1187 1187 0 1187 0 "{0 second,0 second}" "{0 second,0 second}" "{0 second,0 second}" ));
-DATA(insert ( 2100 n 0 int8_avg_accum numeric_poly_avg int8_avg_combine int8_avg_serialize int8_avg_deserialize int8_avg_accum int8_avg_accum_inv numeric_poly_avg f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2101 n 0 int4_avg_accum int8_avg int4_avg_combine - - int4_avg_accum int4_avg_accum_inv int8_avg f f 0 1016 0 1016 0 "{0,0}" "{0,0}" ));
-DATA(insert ( 2102 n 0 int2_avg_accum int8_avg int4_avg_combine - - int2_avg_accum int2_avg_accum_inv int8_avg f f 0 1016 0 1016 0 "{0,0}" "{0,0}" ));
-DATA(insert ( 2103 n 0 numeric_avg_accum numeric_avg numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_avg f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2104 n 0 float4_accum float8_avg float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2105 n 0 float8_accum float8_avg float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2106 n 0 interval_accum interval_avg interval_combine - - interval_accum interval_accum_inv interval_avg f f 0 1187 0 1187 0 "{0 second,0 second}" "{0 second,0 second}" ));
++DATA(insert ( 2100 n 0 int8_avg_accum numeric_poly_collect numeric_poly_avg int8_avg_combine int8_avg_serialize int8_avg_deserialize int8_avg_accum int8_avg_accum_inv numeric_poly_avg f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2101 n 0 int4_avg_accum int8_avg_collect int8_avg int4_avg_combine - - int4_avg_accum int4_avg_accum_inv int8_avg f f 0 1016 1016 0 1016 0 "{0,0}" "{0,0}" "{0,0}" ));
++DATA(insert ( 2102 n 0 int2_avg_accum int8_avg_collect int8_avg int4_avg_combine - - int2_avg_accum int2_avg_accum_inv int8_avg f f 0 1016 1016 0 1016 0 "{0,0}" "{0,0}" "{0,0}" ));
++DATA(insert ( 2103 n 0 numeric_avg_accum numeric_collect numeric_avg numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_avg f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
++DATA(insert ( 2104 n 0 float4_accum float8_collect float8_avg float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2105 n 0 float8_accum float8_collect float8_avg float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2106 n 0 interval_accum interval_collect interval_avg interval_combine - - interval_accum interval_accum_inv interval_avg f f 0 1187 1187 0 1187 0 "{0 second,0 second}" "{0 second,0 second}" "{0 second,0 second}" ));
/* sum */
- DATA(insert ( 2107 n 0 int8_avg_accum numeric_poly_collect numeric_poly_sum int8_avg_accum int8_avg_accum_inv numeric_poly_sum f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2108 n 0 int4_sum int8_sum_to_int8 - int4_avg_accum int4_avg_accum_inv int2int4_sum f f 0 20 20 0 1016 0 _null_ _null_ "{0,0}" ));
- DATA(insert ( 2109 n 0 int2_sum int8_sum_to_int8 - int2_avg_accum int2_avg_accum_inv int2int4_sum f f 0 20 20 0 1016 0 _null_ _null_ "{0,0}" ));
- DATA(insert ( 2110 n 0 float4pl float4pl - - - - f f 0 700 700 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2111 n 0 float8pl float8pl - - - - f f 0 701 701 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2112 n 0 cash_pl cash_pl - cash_pl cash_mi - f f 0 790 790 0 790 0 _null_ _null_ _null_ ));
- DATA(insert ( 2113 n 0 interval_pl interval_pl - interval_pl interval_mi - f f 0 1186 1186 0 1186 0 _null_ _null_ _null_ ));
- DATA(insert ( 2114 n 0 numeric_avg_accum numeric_collect numeric_sum numeric_avg_accum numeric_accum_inv numeric_sum f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
-DATA(insert ( 2107 n 0 int8_avg_accum numeric_poly_sum int8_avg_combine int8_avg_serialize int8_avg_deserialize int8_avg_accum int8_avg_accum_inv numeric_poly_sum f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2108 n 0 int4_sum - int8pl - - int4_avg_accum int4_avg_accum_inv int2int4_sum f f 0 20 0 1016 0 _null_ "{0,0}" ));
-DATA(insert ( 2109 n 0 int2_sum - int8pl - - int2_avg_accum int2_avg_accum_inv int2int4_sum f f 0 20 0 1016 0 _null_ "{0,0}" ));
-DATA(insert ( 2110 n 0 float4pl - float4pl - - - - - f f 0 700 0 0 0 _null_ _null_ ));
-DATA(insert ( 2111 n 0 float8pl - float8pl - - - - - f f 0 701 0 0 0 _null_ _null_ ));
-DATA(insert ( 2112 n 0 cash_pl - cash_pl - - cash_pl cash_mi - f f 0 790 0 790 0 _null_ _null_ ));
-DATA(insert ( 2113 n 0 interval_pl - interval_pl - - interval_pl interval_mi - f f 0 1186 0 1186 0 _null_ _null_ ));
-DATA(insert ( 2114 n 0 numeric_avg_accum numeric_sum numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_sum f f 0 2281 128 2281 128 _null_ _null_ ));
++DATA(insert ( 2107 n 0 int8_avg_accum numeric_poly_collect numeric_poly_sum int8_avg_combine int8_avg_serialize int8_avg_deserialize int8_avg_accum int8_avg_accum_inv numeric_poly_sum f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2108 n 0 int4_sum int8_sum_to_int8 - int8pl - - int4_avg_accum int4_avg_accum_inv int2int4_sum f f 0 20 20 0 1016 0 _null_ _null_ "{0,0}" ));
++DATA(insert ( 2109 n 0 int2_sum int8_sum_to_int8 - int8pl - - int2_avg_accum int2_avg_accum_inv int2int4_sum f f 0 20 20 0 1016 0 _null_ _null_ "{0,0}" ));
++DATA(insert ( 2110 n 0 float4pl float4pl - float4pl - - - - - f f 0 700 700 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2111 n 0 float8pl float8pl - float8pl - - - - - f f 0 701 701 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2112 n 0 cash_pl cash_pl - cash_pl - - cash_pl cash_mi - f f 0 790 790 0 790 0 _null_ _null_ _null_ ));
++DATA(insert ( 2113 n 0 interval_pl interval_pl - interval_pl - - interval_pl interval_mi - f f 0 1186 1186 0 1186 0 _null_ _null_ _null_ ));
++DATA(insert ( 2114 n 0 numeric_avg_accum numeric_collect numeric_sum numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_sum f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
/* max */
- DATA(insert ( 2115 n 0 int8larger int8larger - - - - f f 413 20 20 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2116 n 0 int4larger int4larger - - - - f f 521 23 23 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2117 n 0 int2larger int2larger - - - - f f 520 21 21 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2118 n 0 oidlarger oidlarger - - - - f f 610 26 26 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2119 n 0 float4larger float4larger - - - - f f 623 700 700 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2120 n 0 float8larger float8larger - - - - f f 674 701 701 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2121 n 0 int4larger int4larger - - - - f f 563 702 702 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2122 n 0 date_larger date_larger - - - - f f 1097 1082 1082 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2123 n 0 time_larger time_larger - - - - f f 1112 1083 1083 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2124 n 0 timetz_larger timetz_larger - - - - f f 1554 1266 1266 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2125 n 0 cashlarger cashlarger - - - - f f 903 790 790 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2126 n 0 timestamp_larger timestamp_larger - - - - f f 2064 1114 1114 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2127 n 0 timestamptz_larger timestamptz_larger - - - - f f 1324 1184 1184 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2128 n 0 interval_larger interval_larger - - - - f f 1334 1186 1186 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2129 n 0 text_larger text_larger - - - - f f 666 25 25 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2130 n 0 numeric_larger numeric_larger - - - - f f 1756 1700 1700 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2050 n 0 array_larger array_larger - - - - f f 1073 2277 2277 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2244 n 0 bpchar_larger bpchar_larger - - - - f f 1060 1042 1042 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2797 n 0 tidlarger tidlarger - - - - f f 2800 27 27 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3526 n 0 enum_larger enum_larger - - - - f f 3519 3500 3500 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3564 n 0 network_larger network_larger - - - - f f 1205 869 869 0 0 0 _null_ _null_ _null_ ));
-DATA(insert ( 2115 n 0 int8larger - int8larger - - - - - f f 413 20 0 0 0 _null_ _null_ ));
-DATA(insert ( 2116 n 0 int4larger - int4larger - - - - - f f 521 23 0 0 0 _null_ _null_ ));
-DATA(insert ( 2117 n 0 int2larger - int2larger - - - - - f f 520 21 0 0 0 _null_ _null_ ));
-DATA(insert ( 2118 n 0 oidlarger - oidlarger - - - - - f f 610 26 0 0 0 _null_ _null_ ));
-DATA(insert ( 2119 n 0 float4larger - float4larger - - - - - f f 623 700 0 0 0 _null_ _null_ ));
-DATA(insert ( 2120 n 0 float8larger - float8larger - - - - - f f 674 701 0 0 0 _null_ _null_ ));
-DATA(insert ( 2121 n 0 int4larger - int4larger - - - - - f f 563 702 0 0 0 _null_ _null_ ));
-DATA(insert ( 2122 n 0 date_larger - date_larger - - - - - f f 1097 1082 0 0 0 _null_ _null_ ));
-DATA(insert ( 2123 n 0 time_larger - time_larger - - - - - f f 1112 1083 0 0 0 _null_ _null_ ));
-DATA(insert ( 2124 n 0 timetz_larger - timetz_larger - - - - - f f 1554 1266 0 0 0 _null_ _null_ ));
-DATA(insert ( 2125 n 0 cashlarger - cashlarger - - - - - f f 903 790 0 0 0 _null_ _null_ ));
-DATA(insert ( 2126 n 0 timestamp_larger - timestamp_larger - - - - - f f 2064 1114 0 0 0 _null_ _null_ ));
-DATA(insert ( 2127 n 0 timestamptz_larger - timestamptz_larger - - - - - f f 1324 1184 0 0 0 _null_ _null_ ));
-DATA(insert ( 2128 n 0 interval_larger - interval_larger - - - - - f f 1334 1186 0 0 0 _null_ _null_ ));
-DATA(insert ( 2129 n 0 text_larger - text_larger - - - - - f f 666 25 0 0 0 _null_ _null_ ));
-DATA(insert ( 2130 n 0 numeric_larger - numeric_larger - - - - - f f 1756 1700 0 0 0 _null_ _null_ ));
-DATA(insert ( 2050 n 0 array_larger - array_larger - - - - - f f 1073 2277 0 0 0 _null_ _null_ ));
-DATA(insert ( 2244 n 0 bpchar_larger - bpchar_larger - - - - - f f 1060 1042 0 0 0 _null_ _null_ ));
-DATA(insert ( 2797 n 0 tidlarger - tidlarger - - - - - f f 2800 27 0 0 0 _null_ _null_ ));
-DATA(insert ( 3526 n 0 enum_larger - enum_larger - - - - - f f 3519 3500 0 0 0 _null_ _null_ ));
-DATA(insert ( 3564 n 0 network_larger - network_larger - - - - - f f 1205 869 0 0 0 _null_ _null_ ));
++DATA(insert ( 2115 n 0 int8larger int8larger - int8larger - - - - - f f 413 20 20 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2116 n 0 int4larger int4larger - int4larger - - - - - f f 521 23 23 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2117 n 0 int2larger int2larger - int2larger - - - - - f f 520 21 21 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2118 n 0 oidlarger oidlarger - oidlarger - - - - - f f 610 26 26 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2119 n 0 float4larger float4larger - float4larger - - - - - f f 623 700 700 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2120 n 0 float8larger float8larger - float8larger - - - - - f f 674 701 701 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2121 n 0 int4larger int4larger - int4larger - - - - - f f 563 702 702 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2122 n 0 date_larger date_larger - date_larger - - - - - f f 1097 1082 1082 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2123 n 0 time_larger time_larger - time_larger - - - - - f f 1112 1083 1083 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2124 n 0 timetz_larger timetz_larger - timetz_larger - - - - - f f 1554 1266 1266 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2125 n 0 cashlarger cashlarger - cashlarger - - - - - f f 903 790 790 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2126 n 0 timestamp_larger timestamp_larger timestamp_larger - - - - - - f f 2064 1114 1114 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2127 n 0 timestamptz_larger timestamptz_larger timestamptz_larger - - - - - - f f 1324 1184 1184 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2128 n 0 interval_larger interval_larger - interval_larger - - - - - f f 1334 1186 1186 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2129 n 0 text_larger text_larger - text_larger - - - - - f f 666 25 25 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2130 n 0 numeric_larger numeric_larger - numeric_larger - - - - - f f 1756 1700 1700 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2050 n 0 array_larger array_larger - array_larger - - - - - f f 1073 2277 2277 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2244 n 0 bpchar_larger bpchar_larger - bpchar_larger - - - - - f f 1060 1042 1042 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2797 n 0 tidlarger tidlarger - tidlarger - - - - - f f 2800 27 27 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3526 n 0 enum_larger enum_larger - enum_larger - - - - - f f 3519 3500 3500 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3564 n 0 network_larger network_larger - network_larger - - - - - f f 1205 869 869 0 0 0 _null_ _null_ _null_ ));
/* min */
- DATA(insert ( 2131 n 0 int8smaller int8smaller - - - - f f 412 20 20 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2132 n 0 int4smaller int4smaller - - - - f f 97 23 23 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2133 n 0 int2smaller int2smaller - - - - f f 95 21 21 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2134 n 0 oidsmaller oidsmaller - - - - f f 609 26 26 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2135 n 0 float4smaller float4smaller - - - - f f 622 700 700 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2136 n 0 float8smaller float8smaller - - - - f f 672 701 701 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2137 n 0 int4smaller int4smaller - - - - f f 562 702 702 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2138 n 0 date_smaller date_smaller - - - - f f 1095 1082 1082 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2139 n 0 time_smaller time_smaller - - - - f f 1110 1083 1083 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2140 n 0 timetz_smaller timetz_smaller - - - - f f 1552 1266 1266 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2141 n 0 cashsmaller cashsmaller - - - - f f 902 790 790 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2142 n 0 timestamp_smaller timestamp_smaller - - - - f f 2062 1114 1114 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2143 n 0 timestamptz_smaller timestamptz_smaller - - - - f f 1322 1184 1184 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2144 n 0 interval_smaller interval_smaller - - - - f f 1332 1186 1186 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2145 n 0 text_smaller text_smaller - - - - f f 664 25 25 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2146 n 0 numeric_smaller numeric_smaller - - - - f f 1754 1700 1700 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2051 n 0 array_smaller array_smaller - - - - f f 1072 2277 2277 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2245 n 0 bpchar_smaller bpchar_smaller - - - - f f 1058 1042 1042 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2798 n 0 tidsmaller tidsmaller - - - - f f 2799 27 27 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3527 n 0 enum_smaller enum_smaller - - - - f f 3518 3500 3500 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3565 n 0 network_smaller network_smaller - - - - f f 1203 869 869 0 0 0 _null_ _null_ _null_ ));
-DATA(insert ( 2131 n 0 int8smaller - int8smaller - - - - - f f 412 20 0 0 0 _null_ _null_ ));
-DATA(insert ( 2132 n 0 int4smaller - int4smaller - - - - - f f 97 23 0 0 0 _null_ _null_ ));
-DATA(insert ( 2133 n 0 int2smaller - int2smaller - - - - - f f 95 21 0 0 0 _null_ _null_ ));
-DATA(insert ( 2134 n 0 oidsmaller - oidsmaller - - - - - f f 609 26 0 0 0 _null_ _null_ ));
-DATA(insert ( 2135 n 0 float4smaller - float4smaller - - - - - f f 622 700 0 0 0 _null_ _null_ ));
-DATA(insert ( 2136 n 0 float8smaller - float8smaller - - - - - f f 672 701 0 0 0 _null_ _null_ ));
-DATA(insert ( 2137 n 0 int4smaller - int4smaller - - - - - f f 562 702 0 0 0 _null_ _null_ ));
-DATA(insert ( 2138 n 0 date_smaller - date_smaller - - - - - f f 1095 1082 0 0 0 _null_ _null_ ));
-DATA(insert ( 2139 n 0 time_smaller - time_smaller - - - - - f f 1110 1083 0 0 0 _null_ _null_ ));
-DATA(insert ( 2140 n 0 timetz_smaller - timetz_smaller - - - - - f f 1552 1266 0 0 0 _null_ _null_ ));
-DATA(insert ( 2141 n 0 cashsmaller - cashsmaller - - - - - f f 902 790 0 0 0 _null_ _null_ ));
-DATA(insert ( 2142 n 0 timestamp_smaller - timestamp_smaller - - - - - f f 2062 1114 0 0 0 _null_ _null_ ));
-DATA(insert ( 2143 n 0 timestamptz_smaller - timestamptz_smaller - - - - - f f 1322 1184 0 0 0 _null_ _null_ ));
-DATA(insert ( 2144 n 0 interval_smaller - interval_smaller - - - - - f f 1332 1186 0 0 0 _null_ _null_ ));
-DATA(insert ( 2145 n 0 text_smaller - text_smaller - - - - - f f 664 25 0 0 0 _null_ _null_ ));
-DATA(insert ( 2146 n 0 numeric_smaller - numeric_smaller - - - - - f f 1754 1700 0 0 0 _null_ _null_ ));
-DATA(insert ( 2051 n 0 array_smaller - array_smaller - - - - - f f 1072 2277 0 0 0 _null_ _null_ ));
-DATA(insert ( 2245 n 0 bpchar_smaller - bpchar_smaller - - - - - f f 1058 1042 0 0 0 _null_ _null_ ));
-DATA(insert ( 2798 n 0 tidsmaller - tidsmaller - - - - - f f 2799 27 0 0 0 _null_ _null_ ));
-DATA(insert ( 3527 n 0 enum_smaller - enum_smaller - - - - - f f 3518 3500 0 0 0 _null_ _null_ ));
-DATA(insert ( 3565 n 0 network_smaller - network_smaller - - - - - f f 1203 869 0 0 0 _null_ _null_ ));
++DATA(insert ( 2131 n 0 int8smaller int8smaller - int8smaller - - - - - f f 412 20 20 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2132 n 0 int4smaller int4smaller - int4smaller - - - - - f f 97 23 23 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2133 n 0 int2smaller int2smaller - int2smaller - - - - - f f 95 21 21 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2134 n 0 oidsmaller oidsmaller - oidsmaller - - - - - f f 609 26 26 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2135 n 0 float4smaller float4smaller - float4smaller - - - - - f f 622 700 700 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2136 n 0 float8smaller float8smaller - float8smaller - - - - - f f 672 701 701 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2137 n 0 int4smaller int4smaller - int4smaller - - - - - f f 562 702 702 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2138 n 0 date_smaller date_smaller - date_smaller - - - - - f f 1095 1082 1082 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2139 n 0 time_smaller time_smaller - time_smaller - - - - - f f 1110 1083 1083 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2140 n 0 timetz_smaller timetz_smaller - timetz_smaller - - - - - f f 1552 1266 1266 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2141 n 0 cashsmaller cashsmaller - cashsmaller - - - - - f f 902 790 790 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2142 n 0 timestamp_smaller timestamp_smaller - timestamp_smaller - - - - - f f 2062 1114 1114 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2143 n 0 timestamptz_smaller timestamptz_smaller - timestamptz_smaller - - - - - f f 1322 1184 1184 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2144 n 0 interval_smaller interval_smaller - interval_smaller - - - - - f f 1332 1186 1186 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2145 n 0 text_smaller text_smaller - text_smaller - - - - - f f 664 25 25 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2146 n 0 numeric_smaller numeric_smaller - text_smaller - - - - - f f 1754 1700 1700 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2051 n 0 array_smaller array_smaller - array_smaller - - - - - f f 1072 2277 2277 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2245 n 0 bpchar_smaller bpchar_smaller - array_smaller - - - - - f f 1058 1042 1042 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2798 n 0 tidsmaller tidsmaller - array_smaller - - - - - f f 2799 27 27 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3527 n 0 enum_smaller enum_smaller - enum_smaller - - - - - f f 3518 3500 3500 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3565 n 0 network_smaller network_smaller - network_smaller - - - - - f f 1203 869 869 0 0 0 _null_ _null_ _null_ ));
/* count */
- DATA(insert ( 2147 n 0 int8inc_any int8_sum_to_int8 - int8inc_any int8dec_any - f f 0 20 20 0 20 0 "0" _null_ "0" ));
- DATA(insert ( 2803 n 0 int8inc int8_sum_to_int8 - int8inc int8dec - f f 0 20 20 0 20 0 "0" _null_ "0" ));
-DATA(insert ( 2147 n 0 int8inc_any - int8pl - - int8inc_any int8dec_any - f f 0 20 0 20 0 "0" "0" ));
-DATA(insert ( 2803 n 0 int8inc - int8pl - - int8inc int8dec - f f 0 20 0 20 0 "0" "0" ));
++DATA(insert ( 2147 n 0 int8inc_any int8_sum_to_int8 - int8pl - - int8inc_any int8dec_any - f f 0 20 20 0 20 0 "0" _null_ "0" ));
++DATA(insert ( 2803 n 0 int8inc int8_sum_to_int8 - int8pl - - int8inc int8dec - f f 0 20 20 0 20 0 "0" _null_ "0" ));
/* var_pop */
- DATA(insert ( 2718 n 0 int8_accum numeric_collect numeric_var_pop int8_accum int8_accum_inv numeric_var_pop f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
- DATA(insert ( 2719 n 0 int4_accum numeric_poly_collect numeric_poly_var_pop int4_accum int4_accum_inv numeric_poly_var_pop f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2720 n 0 int2_accum numeric_poly_collect numeric_poly_var_pop int2_accum int2_accum_inv numeric_poly_var_pop f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2721 n 0 float4_accum float8_collect float8_var_pop - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2722 n 0 float8_accum float8_collect float8_var_pop - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2723 n 0 numeric_accum numeric_collect numeric_var_pop numeric_accum numeric_accum_inv numeric_var_pop f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
-DATA(insert ( 2718 n 0 int8_accum numeric_var_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_var_pop f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2719 n 0 int4_accum numeric_poly_var_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_var_pop f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2720 n 0 int2_accum numeric_poly_var_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_var_pop f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2721 n 0 float4_accum float8_var_pop float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2722 n 0 float8_accum float8_var_pop float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2723 n 0 numeric_accum numeric_var_pop numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_pop f f 0 2281 128 2281 128 _null_ _null_ ));
++DATA(insert ( 2718 n 0 int8_accum numeric_collect numeric_var_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_var_pop f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
++DATA(insert ( 2719 n 0 int4_accum numeric_poly_collect numeric_poly_var_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_var_pop f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2720 n 0 int2_accum numeric_poly_collect numeric_poly_var_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_var_pop f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2721 n 0 float4_accum float8_collect float8_var_pop float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2722 n 0 float8_accum float8_collect float8_var_pop float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2723 n 0 numeric_accum numeric_collect numeric_var_pop numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_pop f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
/* var_samp */
- DATA(insert ( 2641 n 0 int8_accum numeric_collect numeric_var_samp int8_accum int8_accum_inv numeric_var_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
- DATA(insert ( 2642 n 0 int4_accum numeric_poly_collect numeric_poly_var_samp int4_accum int4_accum_inv numeric_poly_var_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2643 n 0 int2_accum numeric_poly_collect numeric_poly_var_samp int2_accum int2_accum_inv numeric_poly_var_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2644 n 0 float4_accum float8_collect float8_var_samp - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2645 n 0 float8_accum float8_collect float8_var_samp - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2646 n 0 numeric_accum numeric_collect numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
-DATA(insert ( 2641 n 0 int8_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2642 n 0 int4_accum numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2643 n 0 int2_accum numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2644 n 0 float4_accum float8_var_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2645 n 0 float8_accum float8_var_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2646 n 0 numeric_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ ));
++DATA(insert ( 2641 n 0 int8_accum numeric_collect numeric_var_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_var_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
++DATA(insert ( 2642 n 0 int4_accum numeric_poly_collect numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_var_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2643 n 0 int2_accum numeric_poly_collect numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_var_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2644 n 0 float4_accum float8_collect float8_var_samp float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2645 n 0 float8_accum float8_collect float8_var_samp float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2646 n 0 numeric_accum numeric_collect numeric_var_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
/* variance: historical Postgres syntax for var_samp */
- DATA(insert ( 2148 n 0 int8_accum numeric_collect numeric_var_samp int8_accum int8_accum_inv numeric_var_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
- DATA(insert ( 2149 n 0 int4_accum numeric_poly_collect numeric_poly_var_samp int4_accum int4_accum_inv numeric_poly_var_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2150 n 0 int2_accum numeric_poly_collect numeric_poly_var_samp int2_accum int2_accum_inv numeric_poly_var_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2151 n 0 float4_accum float8_collect float8_var_samp - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2152 n 0 float8_accum float8_collect float8_var_samp - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2153 n 0 numeric_accum numeric_collect numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
-DATA(insert ( 2148 n 0 int8_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2149 n 0 int4_accum numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2150 n 0 int2_accum numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2151 n 0 float4_accum float8_var_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2152 n 0 float8_accum float8_var_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2153 n 0 numeric_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ ));
++DATA(insert ( 2148 n 0 int8_accum numeric_collect numeric_var_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_var_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
++DATA(insert ( 2149 n 0 int4_accum numeric_poly_collect numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_var_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2150 n 0 int2_accum numeric_poly_collect numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_var_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2151 n 0 float4_accum float8_collect float8_var_samp float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2152 n 0 float8_accum float8_collect float8_var_samp float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2153 n 0 numeric_accum numeric_collect numeric_var_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
/* stddev_pop */
- DATA(insert ( 2724 n 0 int8_accum numeric_collect numeric_stddev_pop int8_accum int8_accum_inv numeric_stddev_pop f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
- DATA(insert ( 2725 n 0 int4_accum numeric_poly_collect numeric_poly_stddev_pop int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2726 n 0 int2_accum numeric_poly_collect numeric_poly_stddev_pop int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2727 n 0 float4_accum float8_collect float8_stddev_pop - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2728 n 0 float8_accum float8_collect float8_stddev_pop - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2729 n 0 numeric_accum numeric_collect numeric_stddev_pop numeric_accum numeric_accum_inv numeric_stddev_pop f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
-DATA(insert ( 2724 n 0 int8_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_pop f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2725 n 0 int4_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2726 n 0 int2_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2727 n 0 float4_accum float8_stddev_pop float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2728 n 0 float8_accum float8_stddev_pop float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2729 n 0 numeric_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_pop f f 0 2281 128 2281 128 _null_ _null_ ));
++DATA(insert ( 2724 n 0 int8_accum numeric_collect numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_pop f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
++DATA(insert ( 2725 n 0 int4_accum numeric_poly_collect numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2726 n 0 int2_accum numeric_poly_collect numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2727 n 0 float4_accum float8_collect float8_stddev_pop float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2728 n 0 float8_accum float8_collect float8_stddev_pop float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2729 n 0 numeric_accum numeric_collect numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_pop f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
/* stddev_samp */
- DATA(insert ( 2712 n 0 int8_accum numeric_collect numeric_stddev_samp int8_accum int8_accum_inv numeric_stddev_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
- DATA(insert ( 2713 n 0 int4_accum numeric_poly_collect numeric_poly_stddev_samp int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2714 n 0 int2_accum numeric_poly_collect numeric_poly_stddev_samp int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2715 n 0 float4_accum float8_collect float8_stddev_samp - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2716 n 0 float8_accum float8_collect float8_stddev_samp - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2717 n 0 numeric_accum numeric_collect numeric_stddev_samp numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
-DATA(insert ( 2712 n 0 int8_accum numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2713 n 0 int4_accum numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2714 n 0 int2_accum numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2715 n 0 float4_accum float8_stddev_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2716 n 0 float8_accum float8_stddev_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2717 n 0 numeric_accum numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ ));
++DATA(insert ( 2712 n 0 int8_accum numeric_collect numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
++DATA(insert ( 2713 n 0 int4_accum numeric_poly_collect numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2714 n 0 int2_accum numeric_poly_collect numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2715 n 0 float4_accum float8_collect float8_stddev_samp float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2716 n 0 float8_accum float8_collect float8_stddev_samp float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2717 n 0 numeric_accum numeric_collect numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
/* stddev: historical Postgres syntax for stddev_samp */
- DATA(insert ( 2154 n 0 int8_accum numeric_collect numeric_stddev_samp int8_accum int8_accum_inv numeric_stddev_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
- DATA(insert ( 2155 n 0 int4_accum numeric_poly_collect numeric_poly_stddev_samp int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2156 n 0 int2_accum numeric_poly_collect numeric_poly_stddev_samp int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
- DATA(insert ( 2157 n 0 float4_accum float8_collect float8_stddev_samp - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2158 n 0 float8_accum float8_collect float8_stddev_samp - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2159 n 0 numeric_accum numeric_collect numeric_stddev_samp numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
-DATA(insert ( 2154 n 0 int8_accum numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2155 n 0 int4_accum numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2156 n 0 int2_accum numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2157 n 0 float4_accum float8_stddev_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2158 n 0 float8_accum float8_stddev_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2159 n 0 numeric_accum numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ ));
++DATA(insert ( 2154 n 0 int8_accum numeric_collect numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
++DATA(insert ( 2155 n 0 int4_accum numeric_poly_collect numeric_poly_stddev_sampnumeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2156 n 0 int2_accum numeric_poly_collect numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 7019 7019 128 7019 128 _null_ _null_ _null_ ));
++DATA(insert ( 2157 n 0 float4_accum float8_collect float8_stddev_samp float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2158 n 0 float8_accum float8_collect float8_stddev_samp float8_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2159 n 0 numeric_accum numeric_collect numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 7018 7018 128 7018 128 _null_ _null_ _null_ ));
/* SQL2003 binary regression aggregates */
- DATA(insert ( 2818 n 0 int8inc_float8_float8 int8_sum_to_int8 - - - - f f 0 20 20 0 0 0 "0" _null_ _null_ ));
- DATA(insert ( 2819 n 0 float8_regr_accum float8_regr_collect float8_regr_sxx - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2820 n 0 float8_regr_accum float8_regr_collect float8_regr_syy - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2821 n 0 float8_regr_accum float8_regr_collect float8_regr_sxy - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2822 n 0 float8_regr_accum float8_regr_collect float8_regr_avgx - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2823 n 0 float8_regr_accum float8_regr_collect float8_regr_avgy - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2824 n 0 float8_regr_accum float8_regr_collect float8_regr_r2 - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2825 n 0 float8_regr_accum float8_regr_collect float8_regr_slope - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2826 n 0 float8_regr_accum float8_regr_collect float8_regr_intercept - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2827 n 0 float8_regr_accum float8_regr_collect float8_covar_pop - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2828 n 0 float8_regr_accum float8_regr_collect float8_covar_samp - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2829 n 0 float8_regr_accum float8_regr_collect float8_corr - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2818 n 0 int8inc_float8_float8 - int8pl - - - - - f f 0 20 0 0 0 "0" _null_ ));
-DATA(insert ( 2819 n 0 float8_regr_accum float8_regr_sxx float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2820 n 0 float8_regr_accum float8_regr_syy float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2821 n 0 float8_regr_accum float8_regr_sxy float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2822 n 0 float8_regr_accum float8_regr_avgx float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2823 n 0 float8_regr_accum float8_regr_avgy float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2824 n 0 float8_regr_accum float8_regr_r2 float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2825 n 0 float8_regr_accum float8_regr_slope float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2826 n 0 float8_regr_accum float8_regr_intercept float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2827 n 0 float8_regr_accum float8_covar_pop float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2828 n 0 float8_regr_accum float8_covar_samp float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2829 n 0 float8_regr_accum float8_corr float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2818 n 0 int8inc_float8_float8 int8_sum_to_int8 int8pl - - - - - - f f 0 20 20 0 0 0 "0" _null_ _null_ ));
++DATA(insert ( 2819 n 0 float8_regr_accum float8_regr_collect float8_regr_sxx float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2820 n 0 float8_regr_accum float8_regr_collect float8_regr_syy float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2821 n 0 float8_regr_accum float8_regr_collect float8_regr_sxy float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2822 n 0 float8_regr_accum float8_regr_collect float8_regr_avgx float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2823 n 0 float8_regr_accum float8_regr_collect float8_regr_avgy float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2824 n 0 float8_regr_accum float8_regr_collect float8_regr_r2 float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2825 n 0 float8_regr_accum float8_regr_collect float8_regr_slope float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2826 n 0 float8_regr_accum float8_regr_collect float8_regr_intercept float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2827 n 0 float8_regr_accum float8_regr_collect float8_covar_pop float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2828 n 0 float8_regr_accum float8_regr_collect float8_covar_samp float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2829 n 0 float8_regr_accum float8_regr_collect float8_corr float8_regr_combine - - - - - f f 0 1022 1022 0 0 0 "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
/* boolean-and and boolean-or */
- DATA(insert ( 2517 n 0 booland_statefunc booland_statefunc - bool_accum bool_accum_inv bool_alltrue f f 58 16 16 0 2281 16 _null_ _null_ _null_ ));
- DATA(insert ( 2518 n 0 boolor_statefunc boolor_statefunc - bool_accum bool_accum_inv bool_anytrue f f 59 16 16 0 2281 16 _null_ _null_ _null_ ));
- DATA(insert ( 2519 n 0 booland_statefunc booland_statefunc - bool_accum bool_accum_inv bool_alltrue f f 58 16 16 0 2281 16 _null_ _null_ _null_ ));
-DATA(insert ( 2517 n 0 booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 0 2281 16 _null_ _null_ ));
-DATA(insert ( 2518 n 0 boolor_statefunc - boolor_statefunc - - bool_accum bool_accum_inv bool_anytrue f f 59 16 0 2281 16 _null_ _null_ ));
-DATA(insert ( 2519 n 0 booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 0 2281 16 _null_ _null_ ));
++DATA(insert ( 2517 n 0 booland_statefunc booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 16 0 2281 16 _null_ _null_ _null_ ));
++DATA(insert ( 2518 n 0 boolor_statefunc boolor_statefunc - boolor_statefunc - - bool_accum bool_accum_inv bool_anytrue f f 59 16 16 0 2281 16 _null_ _null_ _null_ ));
++DATA(insert ( 2519 n 0 booland_statefunc booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 16 0 2281 16 _null_ _null_ _null_ ));
/* bitwise integer */
- DATA(insert ( 2236 n 0 int2and int2and - - - - f f 0 21 21 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2237 n 0 int2or int2or - - - - f f 0 21 21 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2238 n 0 int4and int4and - - - - f f 0 23 23 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2239 n 0 int4or int4or - - - - f f 0 23 23 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2240 n 0 int8and int8and - - - - f f 0 20 20 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2241 n 0 int8or int8or - - - - f f 0 20 20 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2242 n 0 bitand bitand - - - - f f 0 1560 1560 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 2243 n 0 bitor bitor - - - - f f 0 1560 1560 0 0 0 _null_ _null_ _null_ ));
-DATA(insert ( 2236 n 0 int2and - int2and - - - - - f f 0 21 0 0 0 _null_ _null_ ));
-DATA(insert ( 2237 n 0 int2or - int2or - - - - - f f 0 21 0 0 0 _null_ _null_ ));
-DATA(insert ( 2238 n 0 int4and - int4and - - - - - f f 0 23 0 0 0 _null_ _null_ ));
-DATA(insert ( 2239 n 0 int4or - int4or - - - - - f f 0 23 0 0 0 _null_ _null_ ));
-DATA(insert ( 2240 n 0 int8and - int8and - - - - - f f 0 20 0 0 0 _null_ _null_ ));
-DATA(insert ( 2241 n 0 int8or - int8or - - - - - f f 0 20 0 0 0 _null_ _null_ ));
-DATA(insert ( 2242 n 0 bitand - bitand - - - - - f f 0 1560 0 0 0 _null_ _null_ ));
-DATA(insert ( 2243 n 0 bitor - bitor - - - - - f f 0 1560 0 0 0 _null_ _null_ ));
++DATA(insert ( 2236 n 0 int2and int2and - int2and - - - - - f f 0 21 21 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2237 n 0 int2or int2or - int2or - - - - - f f 0 21 21 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2238 n 0 int4and int4and - int4and - - - - - f f 0 23 23 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2239 n 0 int4or int4or - int4or - - - - - f f 0 23 23 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2240 n 0 int8and int8and - int8and - - - - - f f 0 20 20 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2241 n 0 int8or int8or - int8or - - - - - f f 0 20 20 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2242 n 0 bitand bitand - bitand - - - - - f f 0 1560 1560 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 2243 n 0 bitor bitor - bitor - - - - - f f 0 1560 1560 0 0 0 _null_ _null_ _null_ ));
/* xml */
- DATA(insert ( 2901 n 0 xmlconcat2 - - - - - f f 0 142 0 0 0 0 _null_ _null_ _null_ ));
-DATA(insert ( 2901 n 0 xmlconcat2 - - - - - - - f f 0 142 0 0 0 _null_ _null_ ));
++DATA(insert ( 2901 n 0 xmlconcat2 - - - - - - - - f f 0 142 0 0 0 0 _null_ _null_ _null_ ));
/* array */
- DATA(insert ( 2335 n 0 array_agg_transfn - array_agg_finalfn - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 4053 n 0 array_agg_array_transfn - array_agg_array_finalfn - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
-DATA(insert ( 2335 n 0 array_agg_transfn array_agg_finalfn - - - - - - t f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 4053 n 0 array_agg_array_transfn array_agg_array_finalfn - - - - - - t f 0 2281 0 0 0 _null_ _null_ ));
++DATA(insert ( 2335 n 0 array_agg_transfn - array_agg_finalfn - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 4053 n 0 array_agg_array_transfn - array_agg_array_finalfn - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
/* text */
- DATA(insert ( 3538 n 0 string_agg_transfn - string_agg_finalfn - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
-DATA(insert ( 3538 n 0 string_agg_transfn string_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
++DATA(insert ( 3538 n 0 string_agg_transfn - string_agg_finalfn - - - - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
/* bytea */
- DATA(insert ( 3545 n 0 bytea_string_agg_transfn - bytea_string_agg_finalfn - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
-DATA(insert ( 3545 n 0 bytea_string_agg_transfn bytea_string_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
++DATA(insert ( 3545 n 0 bytea_string_agg_transfn - bytea_string_agg_finalfn - - - - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
/* json */
- DATA(insert ( 3175 n 0 json_agg_transfn json_agg_collectfn json_agg_finalfn - - - f f 0 7028 7028 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3197 n 0 json_object_agg_transfn - json_object_agg_finalfn - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
-DATA(insert ( 3175 n 0 json_agg_transfn json_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3197 n 0 json_object_agg_transfn json_object_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
++DATA(insert ( 3175 n 0 json_agg_transfn json_agg_collectfn json_agg_finalfn - - - - - - - - - f f 0 7028 7028 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3197 n 0 json_object_agg_transfn - json_object_agg_finalfn - - - - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
/* jsonb */
- DATA(insert ( 3267 n 0 jsonb_agg_transfn - jsonb_agg_finalfn - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3270 n 0 jsonb_object_agg_transfn - jsonb_object_agg_finalfn - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_));
-DATA(insert ( 3267 n 0 jsonb_agg_transfn jsonb_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3270 n 0 jsonb_object_agg_transfn jsonb_object_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
++DATA(insert ( 3267 n 0 jsonb_agg_transfn - jsonb_agg_finalfn - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3270 n 0 jsonb_object_agg_transfn - jsonb_object_agg_finalfn - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_));
/* ordered-set and hypothetical-set aggregates */
- DATA(insert ( 3972 o 1 ordered_set_transition - percentile_disc_final - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3974 o 1 ordered_set_transition - percentile_cont_float8_final - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3976 o 1 ordered_set_transition - percentile_cont_interval_final - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3978 o 1 ordered_set_transition - percentile_disc_multi_final - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3980 o 1 ordered_set_transition - percentile_cont_float8_multi_final - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3982 o 1 ordered_set_transition - percentile_cont_interval_multi_final - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3984 o 0 ordered_set_transition - mode_final - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3986 h 1 ordered_set_transition_multi - rank_final - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3988 h 1 ordered_set_transition_multi - percent_rank_final - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3990 h 1 ordered_set_transition_multi - cume_dist_final - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
- DATA(insert ( 3992 h 1 ordered_set_transition_multi - dense_rank_final - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
-DATA(insert ( 3972 o 1 ordered_set_transition percentile_disc_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3974 o 1 ordered_set_transition percentile_cont_float8_final - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3976 o 1 ordered_set_transition percentile_cont_interval_final - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3978 o 1 ordered_set_transition percentile_disc_multi_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3980 o 1 ordered_set_transition percentile_cont_float8_multi_final - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3982 o 1 ordered_set_transition percentile_cont_interval_multi_final - - - - - - f f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3984 o 0 ordered_set_transition mode_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3986 h 1 ordered_set_transition_multi rank_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3988 h 1 ordered_set_transition_multi percent_rank_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3990 h 1 ordered_set_transition_multi cume_dist_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ ));
-DATA(insert ( 3992 h 1 ordered_set_transition_multi dense_rank_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ ));
--
++DATA(insert ( 3972 o 1 ordered_set_transition - percentile_disc_final - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3974 o 1 ordered_set_transition - percentile_cont_float8_final - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3976 o 1 ordered_set_transition - percentile_cont_interval_final - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3978 o 1 ordered_set_transition - percentile_disc_multi_final - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3980 o 1 ordered_set_transition - percentile_cont_float8_multi_final - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3982 o 1 ordered_set_transition - percentile_cont_interval_multi_final - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3984 o 0 ordered_set_transition - mode_final - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3986 h 1 ordered_set_transition_multi - rank_final - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3988 h 1 ordered_set_transition_multi - percent_rank_final - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3990 h 1 ordered_set_transition_multi - cume_dist_final - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
++DATA(insert ( 3992 h 1 ordered_set_transition_multi - dense_rank_final - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ _null_ ));
/*
* prototypes for functions in pg_aggregate.c
List *parameterDefaults,
Oid variadicArgType,
List *aggtransfnName,
+#ifdef PGXC
+ List *aggcollectfnName,
+#endif
List *aggfinalfnName,
+ List *aggcombinefnName,
+ List *aggserialfnName,
+ List *aggdeserialfnName,
List *aggmtransfnName,
List *aggminvtransfnName,
List *aggmfinalfnName,
Oid aggmTransType,
int32 aggmTransSpace,
const char *agginitval,
- const char *aggminitval);
+#ifdef XCP
+ const char *agginitcollect,
+#endif
+ const char *aggminitval,
+ char proparallel);
#endif /* PG_AGGREGATE_H */
* along with the relation's initial contents.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/catalog/pg_namespace.h
* definition of the system "procedure" relation (pg_proc)
* along with the relation's initial contents.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/catalog/pg_proc.h
DESCR("join selectivity of case-insensitive regex non-match");
/* Aggregate-related functions */
- DATA(insert OID = 1830 ( float8_avg PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_avg _null_ _null_ _null_ ));
+ DATA(insert OID = 1830 ( float8_avg PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_avg _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2512 ( float8_var_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_pop _null_ _null_ _null_ ));
+ DATA(insert OID = 2512 ( float8_var_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_pop _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 1831 ( float8_var_samp PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_samp _null_ _null_ _null_ ));
+ DATA(insert OID = 1831 ( float8_var_samp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_samp _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2513 ( float8_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_pop _null_ _null_ _null_ ));
+ DATA(insert OID = 2513 ( float8_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_pop _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 1832 ( float8_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_samp _null_ _null_ _null_ ));
+ DATA(insert OID = 1832 ( float8_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_samp _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum _null_ _null_ _null_ ));
-DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum _null_ _null_ _null_ ));
++DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 2858 ( numeric_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_avg_accum _null_ _null_ _null_ ));
-DATA(insert OID = 3341 ( numeric_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_combine _null_ _null_ _null_ ));
-DESCR("aggregate combine function");
-DATA(insert OID = 2858 ( numeric_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_avg_accum _null_ _null_ _null_ ));
++DATA(insert OID = 2858 ( numeric_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_avg_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3548 ( numeric_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum_inv _null_ _null_ _null_ ));
-DATA(insert OID = 3337 ( numeric_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_combine _null_ _null_ _null_ ));
-DESCR("aggregate combine function");
-DATA(insert OID = 2740 ( numeric_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_serialize _null_ _null_ _null_ ));
-DESCR("aggregate serial function");
-DATA(insert OID = 2741 ( numeric_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_deserialize _null_ _null_ _null_ ));
-DESCR("aggregate deserial function");
-DATA(insert OID = 3335 ( numeric_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_serialize _null_ _null_ _null_ ));
-DESCR("aggregate serial function");
-DATA(insert OID = 3336 ( numeric_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ numeric_deserialize _null_ _null_ _null_ ));
-DESCR("aggregate deserial function");
-DATA(insert OID = 3548 ( numeric_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3548 ( numeric_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 1834 ( int2_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 21" _null_ _null_ _null_ _null_ _null_ int2_accum _null_ _null_ _null_ ));
-DATA(insert OID = 1834 ( int2_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 21" _null_ _null_ _null_ _null_ _null_ int2_accum _null_ _null_ _null_ ));
++DATA(insert OID = 1834 ( int2_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 21" _null_ _null_ _null_ _null_ _null_ int2_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 1835 ( int4_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 23" _null_ _null_ _null_ _null_ _null_ int4_accum _null_ _null_ _null_ ));
-DATA(insert OID = 1835 ( int4_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 23" _null_ _null_ _null_ _null_ _null_ int4_accum _null_ _null_ _null_ ));
++DATA(insert OID = 1835 ( int4_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 23" _null_ _null_ _null_ _null_ _null_ int4_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 1836 ( int8_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 20" _null_ _null_ _null_ _null_ _null_ int8_accum _null_ _null_ _null_ ));
-DATA(insert OID = 1836 ( int8_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_accum _null_ _null_ _null_ ));
++DATA(insert OID = 1836 ( int8_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 20" _null_ _null_ _null_ _null_ _null_ int8_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 2746 ( int8_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum _null_ _null_ _null_ ));
-DATA(insert OID = 3338 ( numeric_poly_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_combine _null_ _null_ _null_ ));
-DESCR("aggregate combine function");
-DATA(insert OID = 3339 ( numeric_poly_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_serialize _null_ _null_ _null_ ));
-DESCR("aggregate serial function");
-DATA(insert OID = 3340 ( numeric_poly_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_deserialize _null_ _null_ _null_ ));
-DESCR("aggregate deserial function");
-DATA(insert OID = 2746 ( int8_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum _null_ _null_ _null_ ));
++DATA(insert OID = 2746 ( int8_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3567 ( int2_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 21" _null_ _null_ _null_ _null_ _null_ int2_accum_inv _null_ _null_ _null_ ));
-DATA(insert OID = 3567 ( int2_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 21" _null_ _null_ _null_ _null_ _null_ int2_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3567 ( int2_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 21" _null_ _null_ _null_ _null_ _null_ int2_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3568 ( int4_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 23" _null_ _null_ _null_ _null_ _null_ int4_accum_inv _null_ _null_ _null_ ));
-DATA(insert OID = 3568 ( int4_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 23" _null_ _null_ _null_ _null_ _null_ int4_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3568 ( int4_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 23" _null_ _null_ _null_ _null_ _null_ int4_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3569 ( int8_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 20" _null_ _null_ _null_ _null_ _null_ int8_accum_inv _null_ _null_ _null_ ));
-DATA(insert OID = 3569 ( int8_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3569 ( int8_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 20" _null_ _null_ _null_ _null_ _null_ int8_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3387 ( int8_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum_inv _null_ _null_ _null_ ));
-DATA(insert OID = 3387 ( int8_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3387 ( int8_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3178 ( numeric_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_sum _null_ _null_ _null_ ));
-DATA(insert OID = 2785 ( int8_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_combine _null_ _null_ _null_ ));
-DESCR("aggregate combine function");
-DATA(insert OID = 2786 ( int8_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ int8_avg_serialize _null_ _null_ _null_ ));
-DESCR("aggregate serial function");
-DATA(insert OID = 2787 ( int8_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_deserialize _null_ _null_ _null_ ));
-DESCR("aggregate deserial function");
-DATA(insert OID = 3324 ( int4_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 1016" _null_ _null_ _null_ _null_ _null_ int4_avg_combine _null_ _null_ _null_ ));
-DESCR("aggregate combine function");
-DATA(insert OID = 3178 ( numeric_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_sum _null_ _null_ _null_ ));
++DATA(insert OID = 3178 ( numeric_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_sum _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 1837 ( numeric_avg PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_avg _null_ _null_ _null_ ));
-DATA(insert OID = 1837 ( numeric_avg PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_avg _null_ _null_ _null_ ));
++DATA(insert OID = 1837 ( numeric_avg PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_avg _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2514 ( numeric_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_var_pop _null_ _null_ _null_ ));
-DATA(insert OID = 2514 ( numeric_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_var_pop _null_ _null_ _null_ ));
++DATA(insert OID = 2514 ( numeric_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_var_pop _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 1838 ( numeric_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_var_samp _null_ _null_ _null_ ));
-DATA(insert OID = 1838 ( numeric_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_var_samp _null_ _null_ _null_ ));
++DATA(insert OID = 1838 ( numeric_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_var_samp _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2596 ( numeric_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_stddev_pop _null_ _null_ _null_ ));
-DATA(insert OID = 2596 ( numeric_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_stddev_pop _null_ _null_ _null_ ));
++DATA(insert OID = 2596 ( numeric_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_stddev_pop _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 1839 ( numeric_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_stddev_samp _null_ _null_ _null_ ));
-DATA(insert OID = 1839 ( numeric_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_stddev_samp _null_ _null_ _null_ ));
++DATA(insert OID = 1839 ( numeric_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_stddev_samp _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 1840 ( int2_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 20 "20 21" _null_ _null_ _null_ _null_ _null_ int2_sum _null_ _null_ _null_ ));
+ DATA(insert OID = 1840 ( int2_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 20 "20 21" _null_ _null_ _null_ _null_ _null_ int2_sum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 1841 ( int4_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int4_sum _null_ _null_ _null_ ));
+ DATA(insert OID = 1841 ( int4_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int4_sum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 1842 ( int8_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 1700 "1700 20" _null_ _null_ _null_ _null_ _null_ int8_sum _null_ _null_ _null_ ));
+ DATA(insert OID = 1842 ( int8_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 1700 "1700 20" _null_ _null_ _null_ _null_ _null_ int8_sum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3388 ( numeric_poly_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_sum _null_ _null_ _null_ ));
-DATA(insert OID = 3388 ( numeric_poly_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_sum _null_ _null_ _null_ ));
++DATA(insert OID = 3388 ( numeric_poly_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_sum _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3389 ( numeric_poly_avg PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_avg _null_ _null_ _null_ ));
-DATA(insert OID = 3389 ( numeric_poly_avg PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_avg _null_ _null_ _null_ ));
++DATA(insert OID = 3389 ( numeric_poly_avg PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_avg _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3390 ( numeric_poly_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
-DATA(insert OID = 3390 ( numeric_poly_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
++DATA(insert OID = 3390 ( numeric_poly_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3391 ( numeric_poly_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_samp _null_ _null_ _null_ ));
-DATA(insert OID = 3391 ( numeric_poly_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_samp _null_ _null_ _null_ ));
++DATA(insert OID = 3391 ( numeric_poly_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_samp _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3392 ( numeric_poly_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_pop _null_ _null_ _null_ ));
-DATA(insert OID = 3392 ( numeric_poly_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_pop _null_ _null_ _null_ ));
++DATA(insert OID = 3392 ( numeric_poly_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_pop _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3393 ( numeric_poly_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_samp _null_ _null_ _null_ ));
-DATA(insert OID = 3393 ( numeric_poly_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_samp _null_ _null_ _null_ ));
++DATA(insert OID = 3393 ( numeric_poly_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_samp _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 1843 ( interval_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum _null_ _null_ _null_ ));
+ DATA(insert OID = 1843 ( interval_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3549 ( interval_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum_inv _null_ _null_ _null_ ));
+ DATA(insert OID = 3325 ( interval_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_combine _null_ _null_ _null_ ));
+ DESCR("aggregate combine function");
+ DATA(insert OID = 3549 ( interval_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 1844 ( interval_avg PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1186 "1187" _null_ _null_ _null_ _null_ _null_ interval_avg _null_ _null_ _null_ ));
+ DATA(insert OID = 1844 ( interval_avg PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1186 "1187" _null_ _null_ _null_ _null_ _null_ interval_avg _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 1962 ( int2_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum _null_ _null_ _null_ ));
+ DATA(insert OID = 1962 ( int2_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 1963 ( int4_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum _null_ _null_ _null_ ));
+ DATA(insert OID = 1963 ( int4_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3570 ( int2_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum_inv _null_ _null_ _null_ ));
+ DATA(insert OID = 3570 ( int2_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3571 ( int4_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum_inv _null_ _null_ _null_ ));
+ DATA(insert OID = 3571 ( int4_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 1964 ( int8_avg PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1700 "1016" _null_ _null_ _null_ _null_ _null_ int8_avg _null_ _null_ _null_ ));
+ DATA(insert OID = 1964 ( int8_avg PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1016" _null_ _null_ _null_ _null_ _null_ int8_avg _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3572 ( int2int4_sum PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "1016" _null_ _null_ _null_ _null_ _null_ int2int4_sum _null_ _null_ _null_ ));
+ DATA(insert OID = 3572 ( int2int4_sum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "1016" _null_ _null_ _null_ _null_ _null_ int2int4_sum _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2805 ( int8inc_float8_float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 20 "20 701 701" _null_ _null_ _null_ _null_ _null_ int8inc_float8_float8 _null_ _null_ _null_ ));
+ DATA(insert OID = 2805 ( int8inc_float8_float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 20 "20 701 701" _null_ _null_ _null_ _null_ _null_ int8inc_float8_float8 _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 2806 ( float8_regr_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 1022 "1022 701 701" _null_ _null_ _null_ _null_ _null_ float8_regr_accum _null_ _null_ _null_ ));
+ DATA(insert OID = 2806 ( float8_regr_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1022 "1022 701 701" _null_ _null_ _null_ _null_ _null_ float8_regr_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 2807 ( float8_regr_sxx PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxx _null_ _null_ _null_ ));
+ DATA(insert OID = 3342 ( float8_regr_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_regr_combine _null_ _null_ _null_ ));
+ DESCR("aggregate combine function");
+ DATA(insert OID = 2807 ( float8_regr_sxx PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxx _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2808 ( float8_regr_syy PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_syy _null_ _null_ _null_ ));
+ DATA(insert OID = 2808 ( float8_regr_syy PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_syy _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2809 ( float8_regr_sxy PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxy _null_ _null_ _null_ ));
+ DATA(insert OID = 2809 ( float8_regr_sxy PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxy _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2810 ( float8_regr_avgx PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgx _null_ _null_ _null_ ));
+ DATA(insert OID = 2810 ( float8_regr_avgx PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgx _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2811 ( float8_regr_avgy PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgy _null_ _null_ _null_ ));
+ DATA(insert OID = 2811 ( float8_regr_avgy PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgy _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2812 ( float8_regr_r2 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_r2 _null_ _null_ _null_ ));
+ DATA(insert OID = 2812 ( float8_regr_r2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_r2 _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2813 ( float8_regr_slope PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_slope _null_ _null_ _null_ ));
+ DATA(insert OID = 2813 ( float8_regr_slope PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_slope _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2814 ( float8_regr_intercept PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_intercept _null_ _null_ _null_ ));
+ DATA(insert OID = 2814 ( float8_regr_intercept PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_intercept _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2815 ( float8_covar_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_pop _null_ _null_ _null_ ));
+ DATA(insert OID = 2815 ( float8_covar_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_pop _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2816 ( float8_covar_samp PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_samp _null_ _null_ _null_ ));
+ DATA(insert OID = 2816 ( float8_covar_samp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_samp _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 2817 ( float8_corr PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_corr _null_ _null_ _null_ ));
+ DATA(insert OID = 2817 ( float8_corr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_corr _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3535 ( string_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 2281 "2281 25 25" _null_ _null_ _null_ _null_ _null_ string_agg_transfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3535 ( string_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 2281 "2281 25 25" _null_ _null_ _null_ _null_ _null_ string_agg_transfn _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 7000 ( float8_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_collect _null_ _null_ _null_ ));
+#ifdef PGXC
- DATA(insert OID = 7002 ( numeric_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 7018 "7018 7018" _null_ _null_ _null_ _null_ _null_ numeric_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7000 ( float8_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_collect _null_ _null_ _null_ ));
+DESCR("aggregate collection function");
- DATA(insert OID = 7013 ( numeric_poly_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 7019 "7019 7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7002 ( numeric_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 7018 "7018 7018" _null_ _null_ _null_ _null_ _null_ numeric_collect _null_ _null_ _null_ ));
+DESCR("aggregate collection function");
- DATA(insert OID = 7003 ( interval_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7013 ( numeric_poly_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 7019 "7019 7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_collect _null_ _null_ _null_ ));
+DESCR("aggregate poly_collection function");
- DATA(insert OID = 7004 ( int8_avg_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 1016" _null_ _null_ _null_ _null_ _null_ int8_avg_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7003 ( interval_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_collect _null_ _null_ _null_ ));
+DESCR("aggregate transition function");
- DATA(insert OID = 7005 ( int8_sum_to_int8 PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8_sum_to_int8 _null_ _null_ _null_ ));
++DATA(insert OID = 7004 ( int8_avg_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 1016" _null_ _null_ _null_ _null_ _null_ int8_avg_collect _null_ _null_ _null_ ));
+DESCR("AVG(int) collection function");
- DATA(insert OID = 7006 ( float8_regr_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_regr_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7005 ( int8_sum_to_int8 PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8_sum_to_int8 _null_ _null_ _null_ ));
+DESCR("SUM(int) collection function");
- DATA(insert OID = 3536 ( string_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 25 "2281" _null_ _null_ _null_ _null_ _null_ string_agg_finalfn _null_ _null_ _null_ ));
++DATA(insert OID = 7006 ( float8_regr_collect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_regr_collect _null_ _null_ _null_ ));
+DESCR("REGR_...(double, double) collection function");
+#endif
+ DATA(insert OID = 3536 ( string_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 25 "2281" _null_ _null_ _null_ _null_ _null_ string_agg_finalfn _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3538 ( string_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3538 ( string_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("concatenate aggregate input into a string");
- DATA(insert OID = 3543 ( bytea_string_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 2281 "2281 17 17" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_transfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3543 ( bytea_string_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 2281 "2281 17 17" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_transfn _null_ _null_ _null_ ));
DESCR("aggregate transition function");
- DATA(insert OID = 3544 ( bytea_string_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_finalfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3544 ( bytea_string_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_finalfn _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3545 ( string_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0 17 "17 17" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3545 ( string_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 17 "17 17" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("concatenate aggregate input into a bytea");
/* To ASCII conversion */
DESCR("determine if a string is well formed XML content");
/* json */
- DATA(insert OID = 321 ( json_in PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "2275" _null_ _null_ _null_ _null_ _null_ json_in _null_ _null_ _null_ ));
+ DATA(insert OID = 321 ( json_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "2275" _null_ _null_ _null_ _null_ _null_ json_in _null_ _null_ _null_ ));
DESCR("I/O");
- DATA(insert OID = 322 ( json_out PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "114" _null_ _null_ _null_ _null_ _null_ json_out _null_ _null_ _null_ ));
+ DATA(insert OID = 322 ( json_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "114" _null_ _null_ _null_ _null_ _null_ json_out _null_ _null_ _null_ ));
DESCR("I/O");
- DATA(insert OID = 323 ( json_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_recv _null_ _null_ _null_ ));
+ DATA(insert OID = 323 ( json_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_recv _null_ _null_ _null_ ));
DESCR("I/O");
- DATA(insert OID = 324 ( json_send PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "114" _null_ _null_ _null_ _null_ _null_ json_send _null_ _null_ _null_ ));
+ DATA(insert OID = 324 ( json_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "114" _null_ _null_ _null_ _null_ _null_ json_send _null_ _null_ _null_ ));
DESCR("I/O");
- DATA(insert OID = 3153 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2277" _null_ _null_ _null_ _null_ _null_ array_to_json _null_ _null_ _null_ ));
+ DATA(insert OID = 3153 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 114 "2277" _null_ _null_ _null_ _null_ _null_ array_to_json _null_ _null_ _null_ ));
DESCR("map array to json");
- DATA(insert OID = 3154 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "2277 16" _null_ _null_ _null_ _null_ _null_ array_to_json_pretty _null_ _null_ _null_ ));
+ DATA(insert OID = 3154 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 114 "2277 16" _null_ _null_ _null_ _null_ _null_ array_to_json_pretty _null_ _null_ _null_ ));
DESCR("map array to json with optional pretty printing");
- DATA(insert OID = 3155 ( row_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2249" _null_ _null_ _null_ _null_ _null_ row_to_json _null_ _null_ _null_ ));
+ DATA(insert OID = 3155 ( row_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 114 "2249" _null_ _null_ _null_ _null_ _null_ row_to_json _null_ _null_ _null_ ));
DESCR("map row to json");
- DATA(insert OID = 3156 ( row_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "2249 16" _null_ _null_ _null_ _null_ _null_ row_to_json_pretty _null_ _null_ _null_ ));
+ DATA(insert OID = 3156 ( row_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 114 "2249 16" _null_ _null_ _null_ _null_ _null_ row_to_json_pretty _null_ _null_ _null_ ));
DESCR("map row to json with optional pretty printing");
- DATA(insert OID = 3173 ( json_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s 2 0 7028 "7028 2283" _null_ _null_ _null_ _null_ _null_ json_agg_transfn _null_ _null_ _null_ ));
-DATA(insert OID = 3173 ( json_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ _null_ json_agg_transfn _null_ _null_ _null_ ));
++DATA(insert OID = 3173 ( json_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 7028 "7028 2283" _null_ _null_ _null_ _null_ _null_ json_agg_transfn _null_ _null_ _null_ ));
DESCR("json aggregate transition function");
- DATA(insert OID = 7029 ( json_agg_collectfn PGNSP PGUID 12 1 0 0 0 f f f f f f s 2 0 7028 "7028 7028" _null_ _null_ _null_ _null_ _null_ json_agg_collectfn _null_ _null_ _null_ ));
-DATA(insert OID = 3174 ( json_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_agg_finalfn _null_ _null_ _null_ ));
++DATA(insert OID = 7029 ( json_agg_collectfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 7028 "7028 7028" _null_ _null_ _null_ _null_ _null_ json_agg_collectfn _null_ _null_ _null_ ));
+DESCR("json aggregate collection function");
- DATA(insert OID = 3174 ( json_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 114 "7028" _null_ _null_ _null_ _null_ _null_ json_agg_finalfn _null_ _null_ _null_ ));
++DATA(insert OID = 3174 ( json_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 114 "7028" _null_ _null_ _null_ _null_ _null_ json_agg_finalfn _null_ _null_ _null_ ));
DESCR("json aggregate final function");
- DATA(insert OID = 3175 ( json_agg PGNSP PGUID 12 1 0 0 0 t f f f f f s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3175 ( json_agg PGNSP PGUID 12 1 0 0 0 t f f f f f s s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("aggregate input into json");
- DATA(insert OID = 3180 ( json_object_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s 3 0 2281 "2281 2276 2276" _null_ _null_ _null_ _null_ _null_ json_object_agg_transfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3180 ( json_object_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 3 0 2281 "2281 2276 2276" _null_ _null_ _null_ _null_ _null_ json_object_agg_transfn _null_ _null_ _null_ ));
DESCR("json object aggregate transition function");
- DATA(insert OID = 3196 ( json_object_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_object_agg_finalfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3196 ( json_object_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_object_agg_finalfn _null_ _null_ _null_ ));
DESCR("json object aggregate final function");
- DATA(insert OID = 3197 ( json_object_agg PGNSP PGUID 12 1 0 0 0 t f f f f f s 2 0 114 "2276 2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3197 ( json_object_agg PGNSP PGUID 12 1 0 0 0 t f f f f f s s 2 0 114 "2276 2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("aggregate input into a json object");
- DATA(insert OID = 3198 ( json_build_array PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_array _null_ _null_ _null_ ));
+ DATA(insert OID = 3198 ( json_build_array PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_array _null_ _null_ _null_ ));
DESCR("build a json array from any inputs");
- DATA(insert OID = 3199 ( json_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 114 "" _null_ _null_ _null_ _null_ _null_ json_build_array_noargs _null_ _null_ _null_ ));
+ DATA(insert OID = 3199 ( json_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 114 "" _null_ _null_ _null_ _null_ _null_ json_build_array_noargs _null_ _null_ _null_ ));
DESCR("build an empty json array");
- DATA(insert OID = 3200 ( json_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_object _null_ _null_ _null_ ));
+ DATA(insert OID = 3200 ( json_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_object _null_ _null_ _null_ ));
DESCR("build a json object from pairwise key/value inputs");
- DATA(insert OID = 3201 ( json_build_object PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 114 "" _null_ _null_ _null_ _null_ _null_ json_build_object_noargs _null_ _null_ _null_ ));
+ DATA(insert OID = 3201 ( json_build_object PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 114 "" _null_ _null_ _null_ _null_ _null_ json_build_object_noargs _null_ _null_ _null_ ));
DESCR("build an empty json object");
- DATA(insert OID = 3202 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "1009" _null_ _null_ _null_ _null_ _null_ json_object _null_ _null_ _null_ ));
+ DATA(insert OID = 3202 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "1009" _null_ _null_ _null_ _null_ _null_ json_object _null_ _null_ _null_ ));
DESCR("map text array of key value pairs to json object");
- DATA(insert OID = 3203 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "1009 1009" _null_ _null_ _null_ _null_ _null_ json_object_two_arg _null_ _null_ _null_ ));
+ DATA(insert OID = 3203 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 114 "1009 1009" _null_ _null_ _null_ _null_ _null_ json_object_two_arg _null_ _null_ _null_ ));
DESCR("map text arrays of keys and values to json object");
- DATA(insert OID = 3176 ( to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ to_json _null_ _null_ _null_ ));
+ DATA(insert OID = 3176 ( to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ to_json _null_ _null_ _null_ ));
DESCR("map input to json");
- DATA(insert OID = 3261 ( json_strip_nulls PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "114" _null_ _null_ _null_ _null_ _null_ json_strip_nulls _null_ _null_ _null_ ));
+ DATA(insert OID = 3261 ( json_strip_nulls PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "114" _null_ _null_ _null_ _null_ _null_ json_strip_nulls _null_ _null_ _null_ ));
DESCR("remove object fields with null values from json");
- DATA(insert OID = 3947 ( json_object_field PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field _null_ _null_ _null_ ));
- DATA(insert OID = 3948 ( json_object_field_text PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25 "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field_text _null_ _null_ _null_ ));
- DATA(insert OID = 3949 ( json_array_element PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element _null_ _null_ _null_ ));
- DATA(insert OID = 3950 ( json_array_element_text PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25 "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element_text _null_ _null_ _null_ ));
- DATA(insert OID = 3951 ( json_extract_path PGNSP PGUID 12 1 0 25 0 f f f f t f i 2 0 114 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path _null_ _null_ _null_ ));
+ DATA(insert OID = 3947 ( json_object_field PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 114 "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field _null_ _null_ _null_ ));
+ DATA(insert OID = 3948 ( json_object_field_text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3949 ( json_array_element PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 114 "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element _null_ _null_ _null_ ));
+ DATA(insert OID = 3950 ( json_array_element_text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3951 ( json_extract_path PGNSP PGUID 12 1 0 25 0 f f f f t f i s 2 0 114 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path _null_ _null_ _null_ ));
DESCR("get value from json with path elements");
- DATA(insert OID = 3953 ( json_extract_path_text PGNSP PGUID 12 1 0 25 0 f f f f t f i 2 0 25 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3953 ( json_extract_path_text PGNSP PGUID 12 1 0 25 0 f f f f t f i s 2 0 25 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path_text _null_ _null_ _null_ ));
DESCR("get value from json as text with path elements");
- DATA(insert OID = 3955 ( json_array_elements PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 114 "114" "{114,114}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements _null_ _null_ _null_ ));
+ DATA(insert OID = 3955 ( json_array_elements PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 114 "114" "{114,114}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements _null_ _null_ _null_ ));
DESCR("key value pairs of a json object");
- DATA(insert OID = 3969 ( json_array_elements_text PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 25 "114" "{114,25}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3969 ( json_array_elements_text PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 25 "114" "{114,25}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements_text _null_ _null_ _null_ ));
DESCR("elements of json array");
- DATA(insert OID = 3956 ( json_array_length PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "114" _null_ _null_ _null_ _null_ _null_ json_array_length _null_ _null_ _null_ ));
+ DATA(insert OID = 3956 ( json_array_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "114" _null_ _null_ _null_ _null_ _null_ json_array_length _null_ _null_ _null_ ));
DESCR("length of json array");
- DATA(insert OID = 3957 ( json_object_keys PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_object_keys _null_ _null_ _null_ ));
+ DATA(insert OID = 3957 ( json_object_keys PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_object_keys _null_ _null_ _null_ ));
DESCR("get json object keys");
- DATA(insert OID = 3958 ( json_each PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,114}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each _null_ _null_ _null_ ));
+ DATA(insert OID = 3958 ( json_each PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 2249 "114" "{114,25,114}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each _null_ _null_ _null_ ));
DESCR("key value pairs of a json object");
- DATA(insert OID = 3959 ( json_each_text PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,25}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3959 ( json_each_text PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 2249 "114" "{114,25,25}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each_text _null_ _null_ _null_ ));
DESCR("key value pairs of a json object");
- DATA(insert OID = 3960 ( json_populate_record PGNSP PGUID 12 1 0 0 0 f f f f f f s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_record _null_ _null_ _null_ ));
+ DATA(insert OID = 3960 ( json_populate_record PGNSP PGUID 12 1 0 0 0 f f f f f f s s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_record _null_ _null_ _null_ ));
DESCR("get record fields from a json object");
- DATA(insert OID = 3961 ( json_populate_recordset PGNSP PGUID 12 1 100 0 0 f f f f f t s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_recordset _null_ _null_ _null_ ));
+ DATA(insert OID = 3961 ( json_populate_recordset PGNSP PGUID 12 1 100 0 0 f f f f f t s s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_recordset _null_ _null_ _null_ ));
DESCR("get set of records with fields from a json array of objects");
- DATA(insert OID = 3204 ( json_to_record PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_record _null_ _null_ _null_ ));
+ DATA(insert OID = 3204 ( json_to_record PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_record _null_ _null_ _null_ ));
DESCR("get record fields from a json object");
- DATA(insert OID = 3205 ( json_to_recordset PGNSP PGUID 12 1 100 0 0 f f f f f t s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_recordset _null_ _null_ _null_ ));
+ DATA(insert OID = 3205 ( json_to_recordset PGNSP PGUID 12 1 100 0 0 f f f f f t s s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_recordset _null_ _null_ _null_ ));
DESCR("get set of records with fields from a json array of objects");
- DATA(insert OID = 3968 ( json_typeof PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_typeof _null_ _null_ _null_ ));
+ DATA(insert OID = 3968 ( json_typeof PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_typeof _null_ _null_ _null_ ));
DESCR("get the type of a json value");
/* uuid */
DESCR("aggregate final function");
/* hypothetical-set aggregates (and their support functions) */
- DATA(insert OID = 3986 ( rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3986 ( rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("rank of hypothetical row");
- DATA(insert OID = 3987 ( rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_rank_final _null_ _null_ _null_ ));
+ DATA(insert OID = 3987 ( rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_rank_final _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3988 ( percent_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3988 ( percent_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("fractional rank of hypothetical row");
- DATA(insert OID = 3989 ( percent_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_percent_rank_final _null_ _null_ _null_ ));
+ DATA(insert OID = 3989 ( percent_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_percent_rank_final _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3990 ( cume_dist PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3990 ( cume_dist PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("cumulative distribution of hypothetical row");
- DATA(insert OID = 3991 ( cume_dist_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_cume_dist_final _null_ _null_ _null_ ));
+ DATA(insert OID = 3991 ( cume_dist_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_cume_dist_final _null_ _null_ _null_ ));
DESCR("aggregate final function");
- DATA(insert OID = 3992 ( dense_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3992 ( dense_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("rank of hypothetical row without gaps");
- DATA(insert OID = 3993 ( dense_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_dense_rank_final _null_ _null_ _null_ ));
+ DATA(insert OID = 3993 ( dense_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_dense_rank_final _null_ _null_ _null_ ));
DESCR("aggregate final function");
+#ifdef PGXC
+DATA(insert OID = 7007 ( pgxc_pool_check PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pgxc_pool_check _null_ _null_ _null_ ));
+DESCR("check connection information consistency in pooler");
+DATA(insert OID = 7008 ( pgxc_pool_reload PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pgxc_pool_reload _null_ _null_ _null_ ));
+DESCR("reload connection information in pooler and reload server sessions");
+DATA(insert OID = 7009 ( pgxc_node_str PGNSP PGUID 12 1 0 0 0 f f f f t f s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ pgxc_node_str _null_ _null_ _null_ ));
+DESCR("get the name of the node");
+DATA(insert OID = 7010 ( pgxc_is_committed PGNSP PGUID 12 1 1 0 0 f f f f t t s 1 0 16 "28" _null_ _null_ _null_ _null_ _null_ pgxc_is_committed _null_ _null_ _null_ ));
+DESCR("is given GXID committed or aborted?");
+DATA(insert OID = 7024 ( pgxc_is_inprogress PGNSP PGUID 12 1 1 0 0 f f f f t t s 1 0 16 "28" _null_ _null_ _null_ _null_ _null_ pgxc_is_inprogress _null_ _null_ _null_ ));
+DESCR("is given GXID in progress?");
+DATA(insert OID = 7011 ( pgxc_lock_for_backup PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pgxc_lock_for_backup _null_ _null_ _null_ ));
+DESCR("lock the cluster for taking backup");
+DATA(insert OID = 7014 ( numeric_agg_state_in PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7018 "2275" _null_ _null_ _null_ _null_ _null_ numeric_agg_state_in _null_ _null_ _null_ ));
+DESCR("I/O");
+DATA(insert OID = 7015 ( numeric_agg_state_out PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "7018" _null_ _null_ _null_ _null_ _null_ numeric_agg_state_out _null_ _null_ _null_ ));
+DESCR("I/O");
+DATA(insert OID = 7016 ( numeric_agg_state_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7018 "2281" _null_ _null_ _null_ _null_ _null_ numeric_agg_state_recv _null_ _null_ _null_ ));
+DESCR("I/O");
+DATA(insert OID = 7017 ( numeric_agg_state_send PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "7018" _null_ _null_ _null_ _null_ _null_ numeric_agg_state_send _null_ _null_ _null_ ));
+DESCR("I/O");
+DATA(insert OID = 7020 ( numeric_poly_agg_state_in PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7019 "2275" _null_ _null_ _null_ _null_ _null_ numeric_poly_agg_state_in _null_ _null_ _null_ ));
+DESCR("I/O");
+DATA(insert OID = 7021 ( numeric_poly_agg_state_out PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_agg_state_out _null_ _null_ _null_ ));
+DESCR("I/O");
+DATA(insert OID = 7022 ( numeric_poly_agg_state_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7019 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_agg_state_recv _null_ _null_ _null_ ));
+DESCR("I/O");
+DATA(insert OID = 7023 ( numeric_poly_agg_state_send PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_agg_state_send _null_ _null_ _null_ ));
+DESCR("I/O");
+DATA(insert OID = 7030 ( json_agg_state_in PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7028 "2275" _null_ _null_ _null_ _null_ _null_ json_agg_state_in _null_ _null_ _null_ ));
+DESCR("I/O");
+DATA(insert OID = 7025 ( json_agg_state_out PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "7028" _null_ _null_ _null_ _null_ _null_ json_agg_state_out _null_ _null_ _null_ ));
+DESCR("I/O");
+#endif
/* pg_upgrade support */
- DATA(insert OID = 3582 ( binary_upgrade_set_next_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_type_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3582 ( binary_upgrade_set_next_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_type_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
- DATA(insert OID = 3584 ( binary_upgrade_set_next_array_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_array_pg_type_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3584 ( binary_upgrade_set_next_array_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_array_pg_type_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
- DATA(insert OID = 3585 ( binary_upgrade_set_next_toast_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_type_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3585 ( binary_upgrade_set_next_toast_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_type_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
- DATA(insert OID = 3586 ( binary_upgrade_set_next_heap_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_heap_pg_class_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3586 ( binary_upgrade_set_next_heap_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_heap_pg_class_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
- DATA(insert OID = 3587 ( binary_upgrade_set_next_index_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_index_pg_class_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3587 ( binary_upgrade_set_next_index_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_index_pg_class_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
- DATA(insert OID = 3588 ( binary_upgrade_set_next_toast_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_class_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3588 ( binary_upgrade_set_next_toast_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_class_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
- DATA(insert OID = 3589 ( binary_upgrade_set_next_pg_enum_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_enum_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3589 ( binary_upgrade_set_next_pg_enum_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_enum_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
- DATA(insert OID = 3590 ( binary_upgrade_set_next_pg_authid_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_authid_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3590 ( binary_upgrade_set_next_pg_authid_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_authid_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
- DATA(insert OID = 3591 ( binary_upgrade_create_empty_extension PGNSP PGUID 12 1 0 0 0 f f f f f f v 7 0 2278 "25 25 16 25 1028 1009 1009" _null_ _null_ _null_ _null_ _null_ binary_upgrade_create_empty_extension _null_ _null_ _null_ ));
+ DATA(insert OID = 3591 ( binary_upgrade_create_empty_extension PGNSP PGUID 12 1 0 0 0 f f f f f f v r 7 0 2278 "25 25 16 25 1028 1009 1009" _null_ _null_ _null_ _null_ _null_ binary_upgrade_create_empty_extension _null_ _null_ _null_ ));
+ DESCR("for use by pg_upgrade");
+ DATA(insert OID = 4083 ( binary_upgrade_set_record_init_privs PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "16" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_record_init_privs _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
/* replication/origin.h */
- DATA(insert OID = 6003 ( pg_replication_origin_create PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_create _null_ _null_ _null_ ));
+ DATA(insert OID = 6003 ( pg_replication_origin_create PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_create _null_ _null_ _null_ ));
DESCR("create a replication origin");
- DATA(insert OID = 6004 ( pg_replication_origin_drop PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_drop _null_ _null_ _null_ ));
+ DATA(insert OID = 6004 ( pg_replication_origin_drop PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_drop _null_ _null_ _null_ ));
DESCR("drop replication origin identified by its name");
- DATA(insert OID = 6005 ( pg_replication_origin_oid PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 6005 ( pg_replication_origin_oid PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_oid _null_ _null_ _null_ ));
DESCR("translate the replication origin's name to its id");
- DATA(insert OID = 6006 ( pg_replication_origin_session_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_setup _null_ _null_ _null_ ));
+ DATA(insert OID = 6006 ( pg_replication_origin_session_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_setup _null_ _null_ _null_ ));
DESCR("configure session to maintain replication progress tracking for the passed in origin");
- DATA(insert OID = 6007 ( pg_replication_origin_session_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_reset _null_ _null_ _null_ ));
+ DATA(insert OID = 6007 ( pg_replication_origin_session_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v u 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_reset _null_ _null_ _null_ ));
DESCR("teardown configured replication progress tracking");
- DATA(insert OID = 6008 ( pg_replication_origin_session_is_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_is_setup _null_ _null_ _null_ ));
+ DATA(insert OID = 6008 ( pg_replication_origin_session_is_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v r 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_is_setup _null_ _null_ _null_ ));
DESCR("is a replication origin configured in this session");
- DATA(insert OID = 6009 ( pg_replication_origin_session_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 3220 "16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_progress _null_ _null_ _null_ ));
+ DATA(insert OID = 6009 ( pg_replication_origin_session_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 3220 "16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_progress _null_ _null_ _null_ ));
DESCR("get the replication progress of the current session");
- DATA(insert OID = 6010 ( pg_replication_origin_xact_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2278 "3220 1184" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_setup _null_ _null_ _null_ ));
+ DATA(insert OID = 6010 ( pg_replication_origin_xact_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v r 2 0 2278 "3220 1184" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_setup _null_ _null_ _null_ ));
DESCR("setup the transaction's origin lsn and timestamp");
- DATA(insert OID = 6011 ( pg_replication_origin_xact_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2278 "3220 1184" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_reset _null_ _null_ _null_ ));
+ DATA(insert OID = 6011 ( pg_replication_origin_xact_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v r 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_reset _null_ _null_ _null_ ));
DESCR("reset the transaction's origin lsn and timestamp");
- DATA(insert OID = 6012 ( pg_replication_origin_advance PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2278 "25 3220" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_advance _null_ _null_ _null_ ));
+ DATA(insert OID = 6012 ( pg_replication_origin_advance PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 2278 "25 3220" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_advance _null_ _null_ _null_ ));
DESCR("advance replication itentifier to specific location");
- DATA(insert OID = 6013 ( pg_replication_origin_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 3220 "25 16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_progress _null_ _null_ _null_ ));
+ DATA(insert OID = 6013 ( pg_replication_origin_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 3220 "25 16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_progress _null_ _null_ _null_ ));
DESCR("get an individual replication origin's replication progress");
- DATA(insert OID = 6014 ( pg_show_replication_origin_status PGNSP PGUID 12 1 100 0 0 f f f f f t v 0 0 2249 "" "{26,25,3220,3220}" "{o,o,o,o}" "{local_id, external_id, remote_lsn, local_lsn}" _null_ _null_ pg_show_replication_origin_status _null_ _null_ _null_ ));
+ DATA(insert OID = 6014 ( pg_show_replication_origin_status PGNSP PGUID 12 1 100 0 0 f f f f f t v r 0 0 2249 "" "{26,25,3220,3220}" "{o,o,o,o}" "{local_id, external_id, remote_lsn, local_lsn}" _null_ _null_ pg_show_replication_origin_status _null_ _null_ _null_ ));
DESCR("get progress for all replication origins");
- DATA(insert OID = 6015 ( pg_msgmodule_set PGNSP PGUID 12 1 1 0 0 f f f f t t i 4 0 16 "20 20 20 2275" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_set _null_ _null_ _null_ ));
+#ifdef USE_MODULE_MSGIDS
- DATA(insert OID = 6016 ( pg_msgmodule_change PGNSP PGUID 12 1 1 0 0 f f f f t t i 4 0 16 "20 20 20 20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_change _null_ _null_ _null_ ));
++DATA(insert OID = 6015 ( pg_msgmodule_set PGNSP PGUID 12 1 1 0 0 f f f f t t i s 4 0 16 "20 20 20 2275" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_set _null_ _null_ _null_ ));
+DESCR("set debugging level for module/file/msg");
- DATA(insert OID = 6017 ( pg_msgmodule_enable PGNSP PGUID 12 1 1 0 0 f f f f t t i 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_enable _null_ _null_ _null_ ));
++DATA(insert OID = 6016 ( pg_msgmodule_change PGNSP PGUID 12 1 1 0 0 f f f f t t i s 4 0 16 "20 20 20 20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_change _null_ _null_ _null_ ));
+DESCR("change debugging level for module/file/msg");
- DATA(insert OID = 6018 ( pg_msgmodule_disable PGNSP PGUID 12 1 1 0 0 f f f f t t i 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_disable _null_ _null_ _null_ ));
++DATA(insert OID = 6017 ( pg_msgmodule_enable PGNSP PGUID 12 1 1 0 0 f f f f t t i s 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_enable _null_ _null_ _null_ ));
+DESCR("pid to honour overriden log levels");
- DATA(insert OID = 6019 ( pg_msgmodule_enable_all PGNSP PGUID 12 1 1 0 0 f f f f t t i 1 0 16 "16" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_enable_all _null_ _null_ _null_ ));
++DATA(insert OID = 6018 ( pg_msgmodule_disable PGNSP PGUID 12 1 1 0 0 f f f f t t i s 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_disable _null_ _null_ _null_ ));
+DESCR("pid to ignore overriden log levels");
- DATA(insert OID = 6020 ( pg_msgmodule_disable_all PGNSP PGUID 12 1 1 0 0 f f f f t t 0 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_disable_all _null_ _null_ _null_ ));
++DATA(insert OID = 6019 ( pg_msgmodule_enable_all PGNSP PGUID 12 1 1 0 0 f f f f t t i s 1 0 16 "16" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_enable_all _null_ _null_ _null_ ));
+DESCR("all current/future processes to honour overriden log levels");
++DATA(insert OID = 6020 ( pg_msgmodule_disable_all PGNSP PGUID 12 1 1 0 0 f f f f t t 0 s 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_disable_all _null_ _null_ _null_ ));
+DESCR("all processes to ignore overriden log levels");
+#endif
+
+ /* rls */
+ DATA(insert OID = 3298 ( row_security_active PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ row_security_active _null_ _null_ _null_ ));
+ DESCR("row security for current context active on table by table oid");
+ DATA(insert OID = 3299 ( row_security_active PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "25" _null_ _null_ _null_ _null_ _null_ row_security_active_name _null_ _null_ _null_ ));
+ DESCR("row security for current context active on table by table name");
+
+ /* pg_config */
+ DATA(insert OID = 3400 ( pg_config PGNSP PGUID 12 1 23 0 0 f f f f t t i r 0 0 2249 "" "{25,25}" "{o,o}" "{name,setting}" _null_ _null_ pg_config _null_ _null_ _null_ ));
+ DESCR("pg_config binary as a function");
+
+ /* pg_controldata related functions */
+ DATA(insert OID = 3441 ( pg_control_system PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{23,23,20,1184}" "{o,o,o,o}" "{pg_control_version,catalog_version_no,system_identifier,pg_control_last_modified}" _null_ _null_ pg_control_system _null_ _null_ _null_ ));
+ DESCR("pg_controldata general state information as a function");
+
+ DATA(insert OID = 3442 ( pg_control_checkpoint PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{3220,3220,3220,25,23,23,16,25,26,28,28,28,26,28,28,26,28,28,1184}" "{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{checkpoint_location,prior_location,redo_location,redo_wal_file,timeline_id,prev_timeline_id,full_page_writes,next_xid,next_oid,next_multixact_id,next_multi_offset,oldest_xid,oldest_xid_dbid,oldest_active_xid,oldest_multi_xid,oldest_multi_dbid,oldest_commit_ts_xid,newest_commit_ts_xid,checkpoint_time}" _null_ _null_ pg_control_checkpoint _null_ _null_ _null_ ));
+ DESCR("pg_controldata checkpoint state information as a function");
+
+ DATA(insert OID = 3443 ( pg_control_recovery PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{3220,23,3220,3220,16}" "{o,o,o,o,o}" "{min_recovery_end_location,min_recovery_end_timeline,backup_start_location,backup_end_location,end_of_backup_record_required}" _null_ _null_ pg_control_recovery _null_ _null_ _null_ ));
+ DESCR("pg_controldata recovery state information as a function");
+
+ DATA(insert OID = 3444 ( pg_control_init PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{23,23,23,23,23,23,23,23,23,16,16,16,23}" "{o,o,o,o,o,o,o,o,o,o,o,o,o}" "{max_data_alignment,database_block_size,blocks_per_segment,wal_block_size,bytes_per_wal_segment,max_identifier_length,max_index_columns,max_toast_chunk_size,large_object_chunk_size,bigint_timestamps,float4_pass_by_value,float8_pass_by_value,data_page_checksum_version}" _null_ _null_ pg_control_init _null_ _null_ _null_ ));
+ DESCR("pg_controldata init state information as a function");
+
/*
* Symbolic values for provolatile column: these indicate whether the result
* of a function is dependent *only* on the values of its explicit arguments,
* along with the relation's initial contents.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/catalog/pg_type.h
* sequence.h
* prototypes for sequence.c.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/commands/sequence.h
* header file for postgres vacuum cleaner and statistics analyzer
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/commands/vacuum.h
* variable.h
* Routines for handling specialized SET variables.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/commands/variable.h
* and related modules.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/executor/execdesc.h
* support for the POSTGRES executor module
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/executor/executor.h
* tuple table support stuff
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/executor/tuptable.h
* Over time, this has also become the preferred place for widely known
* resource-limitation stuff, such as work_mem and check_stack_depth().
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/miscadmin.h
* definitions for executor state nodes
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/nodes/execnodes.h
* Definitions for tagged nodes.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/nodes/nodes.h
*
/*
* nodes/{outfuncs.c,print.c}
*/
+#ifdef XCP
+extern void set_portable_output(bool value);
+#endif
extern char *nodeToString(const void *obj);
+ struct Bitmapset; /* not to include bitmapset.h here */
+ struct StringInfoData; /* not to include stringinfo.h here */
+ extern void outNode(struct StringInfoData *str, const void *obj);
+ extern void outToken(struct StringInfoData *str, const char *s);
+ extern void outBitmapset(struct StringInfoData *str,
+ const struct Bitmapset *bms);
+ extern void outDatum(struct StringInfoData *str, uintptr_t value,
+ int typlen, bool typbyval);
+
/*
* nodes/{readfuncs.c,read.c}
*/
+#ifdef XCP
+extern void set_portable_input(bool value);
+#endif
extern void *stringToNode(char *str);
+ extern struct Bitmapset *readBitmapset(void);
+ extern uintptr_t readDatum(bool typbyval);
+ extern bool *readBoolCols(int numCols);
+ extern int *readIntCols(int numCols);
+ extern Oid *readOidCols(int numCols);
+ extern int16 *readAttrNumberCols(int numCols);
/*
* nodes/copyfuncs.c
* Support for finding the values associated with Param nodes.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/nodes/params.h
* the location.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/nodes/parsenodes.h
*
* definitions for query plan nodes
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/nodes/plannodes.h
List *invalItems; /* other dependencies, as PlanInvalItems */
int nParamExec; /* number of PARAM_EXEC Params used */
-
- bool hasRowSecurity; /* row security applied? */
+#ifdef XCP
+ int nParamRemote; /* number of params sent from the master mode */
+
+ struct RemoteParam *remoteparams;/* parameter descriptors */
+
+ const char *pname; /* the portal name */
+
+ /* Parameters to filter out result rows */
+ char distributionType;
+ AttrNumber distributionKey;
+ List *distributionNodes;
+ List *distributionRestrict;
+#endif
} PlannedStmt;
/* macro for fetching the Plan associated with a SubPlan node */
Oid *grpOperators; /* equality operators to compare with */
} Group;
++#ifdef XCP
++typedef enum AggDistribution
++{
++ AGG_ONENODE, /* not distributed aggregation */
++ AGG_SLAVE, /* execute only transient function */
++ AGG_MASTER /* execute collection function as transient
++ * and final finction */
++} AggDistribution;
++#endif
++
/* ---------------
* aggregate node
*
typedef struct Agg
{
Plan plan;
- AggStrategy aggstrategy;
+ AggStrategy aggstrategy; /* basic strategy, see nodes.h */
+#ifdef XCP
+ AggDistribution aggdistribution;
+#endif
+ AggSplit aggsplit; /* agg-splitting mode, see nodes.h */
int numCols; /* number of grouping columns */
AttrNumber *grpColIdx; /* their indexes in the target list */
Oid *grpOperators; /* equality operators to compare with */
* and join trees.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/nodes/primnodes.h
*
Oid aggtype; /* type Oid of result of the aggregate */
Oid aggcollid; /* OID of collation of result */
Oid inputcollid; /* OID of collation that function should use */
+#ifdef PGXC
+ Oid aggtrantype; /* type Oid of transition results */
+ bool agghas_collectfn; /* is collection function available */
+#endif /* PGXC */
+ Oid aggtranstype; /* type Oid of aggregate's transition value */
+ List *aggargtypes; /* type Oids of direct and aggregated args */
List *aggdirectargs; /* direct arguments, if an ordered-set agg */
List *args; /* aggregated arguments and sort expressions */
List *aggorder; /* ORDER BY (list of SortGroupClause) */
* Definitions for planner's internal data structures.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/nodes/relation.h
/* optional private data for join_search_hook, e.g., GEQO */
void *join_search_private;
-
- /* for GroupingFunc fixup in setrefs */
- AttrNumber *grouping_map;
+#ifdef XCP
+ /*
+ * This is NULL for a SELECT query (NULL distribution means "Coordinator"
+ * everywhere in the planner. For INSERT, UPDATE or DELETE it should match
+ * to the target table distribution.
+ */
+ Distribution *distribution; /* Query result distribution */
+ bool recursiveOk;
+#endif
} PlannerInfo;
List *uniq_exprs; /* expressions to be made unique */
} UniquePath;
+#ifdef XCP
+typedef struct RemoteSubPath
+{
+ Path path;
+ Path *subpath;
+} RemoteSubPath;
+#endif
+
+ /*
+ * GatherPath runs several copies of a plan in parallel and collects the
+ * results. The parallel leader may also execute the plan, unless the
+ * single_copy flag is set.
+ */
+ typedef struct GatherPath
+ {
+ Path path;
+ Path *subpath; /* path for each worker */
+ bool single_copy; /* path must not be executed >1x */
+ } GatherPath;
+
/*
* All join-type paths share these fields.
*/
int num_batches; /* number of batches expected */
} HashPath;
+#ifdef PGXC
+/*
+ * A remotequery path represents the queries to be sent to the datanode/s
+ *
+ * When RemoteQuery plan is created from RemoteQueryPath, we build the query to
+ * be executed at the datanode. For building such a query, it's important to get
+ * the RHS relation and LHS relation of the JOIN clause. So, instead of storing
+ * the outer and inner paths, we find out the RHS and LHS paths and store those
+ * here.
+ */
+
+typedef struct RemoteQueryPath
+{
+ Path path;
+ ExecNodes *rqpath_en; /* List of datanodes to execute the query on */
+
+ /*
+ * If the path represents a JOIN rel, leftpath and rightpath represent the
+ * RemoteQuery paths for left (outer) and right (inner) side of the JOIN
+ * resp. jointype and join_restrictlist pertains to such JOINs.
+ */
+ struct RemoteQueryPath *leftpath;
+ struct RemoteQueryPath *rightpath;
+ JoinType jointype;
+ List *join_restrictlist; /* restrict list corresponding to JOINs,
+ * only considered if rest of
+ * the JOIN information is
+ * available
+ */
+ bool has_unshippable_qual; /* TRUE if there is at least
+ * one qual which can not be
+ * shipped to the datanodes
+ */
+ bool has_temp_rel; /* TRUE if one of the base relations
+ * involved in this path is a temporary
+ * table.
+ */
+ bool has_unshippable_tlist; /* TRUE if there is at least one
+ * targetlist entry which is
+ * not completely shippable.
+ */
+} RemoteQueryPath;
+#endif /* PGXC */
+
+ /*
+ * ProjectionPath represents a projection (that is, targetlist computation)
+ *
+ * Nominally, this path node represents using a Result plan node to do a
+ * projection step. However, if the input plan node supports projection,
+ * we can just modify its output targetlist to do the required calculations
+ * directly, and not need a Result. In some places in the planner we can just
+ * jam the desired PathTarget into the input path node (and adjust its cost
+ * accordingly), so we don't need a ProjectionPath. But in other places
+ * it's necessary to not modify the input path node, so we need a separate
+ * ProjectionPath node, which is marked dummy to indicate that we intend to
+ * assign the work to the input plan node. The estimated cost for the
+ * ProjectionPath node will account for whether a Result will be used or not.
+ */
+ typedef struct ProjectionPath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ bool dummypp; /* true if no separate Result is needed */
+ } ProjectionPath;
+
+ /*
+ * SortPath represents an explicit sort step
+ *
+ * The sort keys are, by definition, the same as path.pathkeys.
+ *
+ * Note: the Sort plan node cannot project, so path.pathtarget must be the
+ * same as the input's pathtarget.
+ */
+ typedef struct SortPath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ } SortPath;
+
+ /*
+ * GroupPath represents grouping (of presorted input)
+ *
+ * groupClause represents the columns to be grouped on; the input path
+ * must be at least that well sorted.
+ *
+ * We can also apply a qual to the grouped rows (equivalent of HAVING)
+ */
+ typedef struct GroupPath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ List *groupClause; /* a list of SortGroupClause's */
+ List *qual; /* quals (HAVING quals), if any */
+ } GroupPath;
+
+ /*
+ * UpperUniquePath represents adjacent-duplicate removal (in presorted input)
+ *
+ * The columns to be compared are the first numkeys columns of the path's
+ * pathkeys. The input is presumed already sorted that way.
+ */
+ typedef struct UpperUniquePath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ int numkeys; /* number of pathkey columns to compare */
+ } UpperUniquePath;
+
+ /*
+ * AggPath represents generic computation of aggregate functions
+ *
+ * This may involve plain grouping (but not grouping sets), using either
+ * sorted or hashed grouping; for the AGG_SORTED case, the input must be
+ * appropriately presorted.
+ */
+ typedef struct AggPath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ AggStrategy aggstrategy; /* basic strategy, see nodes.h */
+ AggSplit aggsplit; /* agg-splitting mode, see nodes.h */
+ double numGroups; /* estimated number of groups in input */
+ List *groupClause; /* a list of SortGroupClause's */
+ List *qual; /* quals (HAVING quals), if any */
+ } AggPath;
+
+ /*
+ * GroupingSetsPath represents a GROUPING SETS aggregation
+ *
+ * Currently we only support this in sorted not hashed form, so the input
+ * must always be appropriately presorted.
+ */
+ typedef struct GroupingSetsPath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ List *rollup_groupclauses; /* list of lists of SortGroupClause's */
+ List *rollup_lists; /* parallel list of lists of grouping sets */
+ List *qual; /* quals (HAVING quals), if any */
+ } GroupingSetsPath;
+
+ /*
+ * MinMaxAggPath represents computation of MIN/MAX aggregates from indexes
+ */
+ typedef struct MinMaxAggPath
+ {
+ Path path;
+ List *mmaggregates; /* list of MinMaxAggInfo */
+ List *quals; /* HAVING quals, if any */
+ } MinMaxAggPath;
+
+ /*
+ * WindowAggPath represents generic computation of window functions
+ *
+ * Note: winpathkeys is separate from path.pathkeys because the actual sort
+ * order might be an extension of winpathkeys; but createplan.c needs to
+ * know exactly how many pathkeys match the window clause.
+ */
+ typedef struct WindowAggPath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ WindowClause *winclause; /* WindowClause we'll be using */
+ List *winpathkeys; /* PathKeys for PARTITION keys + ORDER keys */
+ } WindowAggPath;
+
+ /*
+ * SetOpPath represents a set-operation, that is INTERSECT or EXCEPT
+ */
+ typedef struct SetOpPath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ SetOpCmd cmd; /* what to do, see nodes.h */
+ SetOpStrategy strategy; /* how to do it, see nodes.h */
+ List *distinctList; /* SortGroupClauses identifying target cols */
+ AttrNumber flagColIdx; /* where is the flag column, if any */
+ int firstFlag; /* flag value for first input relation */
+ double numGroups; /* estimated number of groups in input */
+ } SetOpPath;
+
+ /*
+ * RecursiveUnionPath represents a recursive UNION node
+ */
+ typedef struct RecursiveUnionPath
+ {
+ Path path;
+ Path *leftpath; /* paths representing input sources */
+ Path *rightpath;
+ List *distinctList; /* SortGroupClauses identifying target cols */
+ int wtParam; /* ID of Param representing work table */
+ double numGroups; /* estimated number of groups in input */
+ } RecursiveUnionPath;
+
+ /*
+ * LockRowsPath represents acquiring row locks for SELECT FOR UPDATE/SHARE
+ */
+ typedef struct LockRowsPath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ List *rowMarks; /* a list of PlanRowMark's */
+ int epqParam; /* ID of Param for EvalPlanQual re-eval */
+ } LockRowsPath;
+
+ /*
+ * ModifyTablePath represents performing INSERT/UPDATE/DELETE modifications
+ *
+ * We represent most things that will be in the ModifyTable plan node
+ * literally, except we have child Path(s) not Plan(s). But analysis of the
+ * OnConflictExpr is deferred to createplan.c, as is collection of FDW data.
+ */
+ typedef struct ModifyTablePath
+ {
+ Path path;
+ CmdType operation; /* INSERT, UPDATE, or DELETE */
+ bool canSetTag; /* do we set the command tag/es_processed? */
+ Index nominalRelation; /* Parent RT index for use of EXPLAIN */
+ List *resultRelations; /* integer list of RT indexes */
+ List *subpaths; /* Path(s) producing source data */
+ List *subroots; /* per-target-table PlannerInfos */
+ List *withCheckOptionLists; /* per-target-table WCO lists */
+ List *returningLists; /* per-target-table RETURNING tlists */
+ List *rowMarks; /* PlanRowMarks (non-locking only) */
+ OnConflictExpr *onconflict; /* ON CONFLICT clause, or NULL */
+ int epqParam; /* ID of Param for EvalPlanQual re-eval */
+ } ModifyTablePath;
+
+ /*
+ * LimitPath represents applying LIMIT/OFFSET restrictions
+ */
+ typedef struct LimitPath
+ {
+ Path path;
+ Path *subpath; /* path representing input source */
+ Node *limitOffset; /* OFFSET parameter, or NULL if none */
+ Node *limitCount; /* COUNT parameter, or NULL if none */
+ } LimitPath;
+
+
/*
* Restriction clause info.
*
* prototypes for costsize.c and clausesel.c.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/optimizer/cost.h
#define DEFAULT_CPU_TUPLE_COST 0.01
#define DEFAULT_CPU_INDEX_TUPLE_COST 0.005
#define DEFAULT_CPU_OPERATOR_COST 0.0025
+#ifdef XCP
+#define DEFAULT_NETWORK_BYTE_COST 0.001
+#define DEFAULT_REMOTE_QUERY_COST 100.0
+#endif
+ #define DEFAULT_PARALLEL_TUPLE_COST 0.1
+ #define DEFAULT_PARALLEL_SETUP_COST 1000.0
#define DEFAULT_EFFECTIVE_CACHE_SIZE 524288 /* measured in pages */
extern PGDLLIMPORT double cpu_tuple_cost;
extern PGDLLIMPORT double cpu_index_tuple_cost;
extern PGDLLIMPORT double cpu_operator_cost;
+#ifdef XCP
+extern PGDLLIMPORT double network_byte_cost;
+extern PGDLLIMPORT double remote_query_cost;
+#endif
+ extern PGDLLIMPORT double parallel_tuple_cost;
+ extern PGDLLIMPORT double parallel_setup_cost;
extern PGDLLIMPORT int effective_cache_size;
extern Cost disable_cost;
+ extern int max_parallel_workers_per_gather;
extern bool enable_seqscan;
extern bool enable_indexscan;
extern bool enable_indexonlyscan;
RelOptInfo *baserel, ParamPathInfo *param_info);
extern void cost_valuesscan(Path *path, PlannerInfo *root,
RelOptInfo *baserel, ParamPathInfo *param_info);
+#ifdef PGXC
+extern void cost_remotequery(Path *path, PlannerInfo *root, RelOptInfo *baserel);
+#endif
extern void cost_ctescan(Path *path, PlannerInfo *root,
RelOptInfo *baserel, ParamPathInfo *param_info);
- extern void cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm);
+ extern void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm);
extern void cost_sort(Path *path, PlannerInfo *root,
List *pathkeys, Cost input_cost, double tuples, int width,
Cost comparison_cost, int sort_mem,
* prototypes for pathnode.c, relnode.c.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/optimizer/pathnode.h
extern MaterialPath *create_material_path(RelOptInfo *rel, Path *subpath);
extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel,
Path *subpath, SpecialJoinInfo *sjinfo);
- extern Path *create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
- List *pathkeys, Relids required_outer,
+ extern GatherPath *create_gather_path(PlannerInfo *root,
+ RelOptInfo *rel, Path *subpath, PathTarget *target,
+ Relids required_outer, double *rows);
+#ifdef XCP
- extern Path *create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
++extern SubqueryScanPath *create_subqueryscan_path(PlannerInfo *root,
++ RelOptInfo *rel, Path *subpathList *pathkeys, Relids required_outer,
+ Distribution *distribution);
+#else
+ extern SubqueryScanPath *create_subqueryscan_path(PlannerInfo *root,
+ RelOptInfo *rel, Path *subpath,
List *pathkeys, Relids required_outer);
+#endif
extern Path *create_functionscan_path(PlannerInfo *root, RelOptInfo *rel,
List *pathkeys, Relids required_outer);
extern Path *create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel,
* prototypes for various files in optimizer/plan
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/optimizer/planmain.h
#include "nodes/plannodes.h"
#include "nodes/relation.h"
+#ifdef XCP
+#include "pgxc/planner.h"
+#endif
+ /* possible values for force_parallel_mode */
+ typedef enum
+ {
+ FORCE_PARALLEL_OFF,
+ FORCE_PARALLEL_ON,
+ FORCE_PARALLEL_REGRESS
+ } ForceParallelMode;
+
/* GUC parameters */
#define DEFAULT_CURSOR_TUPLE_FRACTION 0.1
extern double cursor_tuple_fraction;
extern RowMarkType select_rowmark_type(RangeTblEntry *rte,
LockClauseStrength strength);
+ extern void mark_partial_aggref(Aggref *agg, AggSplit aggsplit);
+
+ extern Path *get_cheapest_fractional_path(RelOptInfo *rel,
+ double tuple_fraction);
+
extern Expr *expression_planner(Expr *expr);
+#ifdef PGXC
+extern void GetHashExecNodes(RelationLocInfo *rel_loc_info,
+ ExecNodes **exec_nodes, const Expr *expr);
+#endif
extern Expr *preprocess_phv_expression(PlannerInfo *root, Expr *expr);
* parse analysis for optimizable statements
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/parser/analyze.h
* by the PG_KEYWORD macro, which is not defined in this file; it can
* be defined by the caller for special purposes.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* IDENTIFICATION
* src/include/parser/kwlist.h
PG_KEYWORD("delete", DELETE_P, UNRESERVED_KEYWORD)
PG_KEYWORD("delimiter", DELIMITER, UNRESERVED_KEYWORD)
PG_KEYWORD("delimiters", DELIMITERS, UNRESERVED_KEYWORD)
+ PG_KEYWORD("depends", DEPENDS, UNRESERVED_KEYWORD)
PG_KEYWORD("desc", DESC, RESERVED_KEYWORD)
PG_KEYWORD("dictionary", DICTIONARY, UNRESERVED_KEYWORD)
+PG_KEYWORD("direct", DIRECT, UNRESERVED_KEYWORD)
PG_KEYWORD("disable", DISABLE_P, UNRESERVED_KEYWORD)
PG_KEYWORD("discard", DISCARD, UNRESERVED_KEYWORD)
PG_KEYWORD("distinct", DISTINCT, RESERVED_KEYWORD)
* parse_agg.h
* handle aggregates and window functions in parser
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/parser/parse_agg.h
Oid *inputTypes,
int numArguments);
- extern void build_aggregate_fnexprs(Oid *agg_input_types,
- int agg_num_inputs,
- int agg_num_direct_inputs,
- int num_finalfn_inputs,
- bool agg_variadic,
- Oid agg_state_type,
+ extern void build_aggregate_transfn_expr(Oid *agg_input_types,
+ int agg_num_inputs,
+ int agg_num_direct_inputs,
+ bool agg_variadic,
+ Oid agg_state_type,
+#ifdef XCP
+ Oid agg_collect_type,
+#endif
- Oid agg_result_type,
- Oid agg_input_collation,
- Oid transfn_oid,
+ Oid agg_input_collation,
+ Oid transfn_oid,
+#ifdef XCP
+ Oid collectfn_oid,
+#endif
- Oid invtransfn_oid,
- Oid finalfn_oid,
- Expr **transfnexpr,
- Expr **invtransfnexpr,
+ Oid invtransfn_oid,
+ Expr **transfnexpr,
+#ifdef XCP
+ Expr **collectfnexpr,
+#endif
- Expr **finalfnexpr);
+ Expr **invtransfnexpr);
+
+ extern void build_aggregate_combinefn_expr(Oid agg_state_type,
+ Oid agg_input_collation,
+ Oid combinefn_oid,
+ Expr **combinefnexpr);
+
+ extern void build_aggregate_serialfn_expr(Oid serialfn_oid,
+ Expr **serialfnexpr);
+
+ extern void build_aggregate_deserialfn_expr(Oid deserialfn_oid,
+ Expr **deserialfnexpr);
+
+ extern void build_aggregate_finalfn_expr(Oid *agg_input_types,
+ int num_finalfn_inputs,
+ Oid agg_state_type,
+ Oid agg_result_type,
+ Oid agg_input_collation,
+ Oid finalfn_oid,
+ Expr **finalfnexpr);
#endif /* PARSE_AGG_H */
* parse analysis for utility commands
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/parser/parse_utilcmd.h
*
#define MEMSET_LOOP_LIMIT 1024
/* Define to the address where bug reports for this package should be sent. */
/* Define to the full name of this package. */
-#define PACKAGE_NAME "PostgreSQL"
+#define PACKAGE_NAME "Postgres-XL"
/* Define to the full name and version of this package. */
- #define PACKAGE_STRING "Postgres-XL 9.5alpha1"
-#define PACKAGE_STRING "PostgreSQL 9.6beta4"
++#define PACKAGE_STRING "Postgres-XL 9.6alpha1"
/* Define to the version of this package. */
- #define PACKAGE_VERSION "9.5alpha1"
-#define PACKAGE_VERSION "9.6beta4"
++#define PACKAGE_VERSION "9.6alpha1"
/* Define to the name of a signed 128-bit integer type. */
#undef PG_INT128_TYPE
#define PG_INT64_TYPE long long int
/* PostgreSQL version as a string */
- #define PG_VERSION "9.5alpha1"
+ #define PG_VERSION "9.6beta4"
/* PostgreSQL version as a number */
- #define PG_VERSION_NUM 90500
+ #define PG_VERSION_NUM 90600
/* Define to the one symbol short name of this package. */
-#define PACKAGE_TARNAME "postgresql"
+#define PACKAGE_TARNAME "postgres-xl"
+
+/* Postgres-XC version as a string */
+#define PGXC_VERSION "1.1devel"
+
+/* Postgres-XC version as a number */
+#define PGXC_VERSION_NUM 10100
/* Define to the name of the default PostgreSQL service principal in Kerberos.
(--with-krb-srvnam=NAME) */
*
* Definitions for the PostgreSQL statistics collector daemon.
*
- * Copyright (c) 2001-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Copyright (c) 2001-2016, PostgreSQL Global Development Group
*
* src/include/pgstat.h
* ----------
* Client-side code should include postgres_fe.h instead.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1995, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/postgres.h
*
* header file for integrated autovacuum daemon
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/postmaster/autovacuum.h
*
* POSTGRES backend id communication definitions
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/storage/backendid.h
extern PGDLLIMPORT BackendId MyBackendId; /* backend id of this backend */
+#ifdef XCP
+/*
+ * Two next variables make up distributed session id. Actual distributed
+ * session id is a string, which includes coordinator node name, but
+ * it is better to use Oid to store and compare with distributed session ids
+ * of other backends under the same postmaster.
+ */
+extern PGDLLIMPORT Oid MyCoordId;
+extern PGDLLIMPORT char MyCoordName[NAMEDATALEN];
+
+extern PGDLLIMPORT int MyCoordPid;
+extern PGDLLIMPORT LocalTransactionId MyCoordLxid;
+
+/* BackendId of the first backend of the distributed session on the node */
+extern PGDLLIMPORT BackendId MyFirstBackendId;
+#endif
+ /* backend id of our parallel session leader, or InvalidBackendId if none */
+ extern PGDLLIMPORT BackendId ParallelMasterBackendId;
+
+ /*
+ * The BackendId to use for our session's temp relations is normally our own,
+ * but parallel workers should use their leader's ID.
+ */
+ #define BackendIdForTempRelations() \
+ (ParallelMasterBackendId == InvalidBackendId ? MyBackendId : ParallelMasterBackendId)
#endif /* BACKENDID_H */
* Lightweight lock manager
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/storage/lwlock.h
* per-process shared memory data structures
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/storage/proc.h
* POSTGRES process array definitions.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/storage/procarray.h
*
* Routines for interprocess signalling
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/storage/procsignal.h
* storage manager switch public interface declarations.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/storage/smgr.h
* calls in portal and cursor manipulations.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/tcop/dest.h
DestTuplestore, /* results sent to Tuplestore */
DestIntoRel, /* results sent to relation (SELECT INTO) */
DestCopyOut, /* results sent to COPY TO code */
- DestSQLFunction, /* results sent to SQL-language func mgr */
+ DestSQLFunction, /* results sent to SQL-language func mgr */
+#ifdef XCP
+ DestProducer, /* results sent to a SharedQueue */
+#endif
- DestTransientRel /* results sent to transient relation */
+ DestTransientRel, /* results sent to transient relation */
+ DestTupleQueue /* results sent to tuple queue */
} CommandDest;
/* ----------------
* prototypes for pquery.c.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/tcop/pquery.h
* Declarations for operations on built-in types.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/utils/builtins.h
#define BUILTINS_H
#include "fmgr.h"
+#include "lib/stringinfo.h"
#include "nodes/parsenodes.h"
-
+#ifdef PGXC
+#include "lib/stringinfo.h"
+#endif
+ #include "utils/sortsupport.h"
/*
* Defined in adt/
*/
extern Datum radians(PG_FUNCTION_ARGS);
extern Datum drandom(PG_FUNCTION_ARGS);
extern Datum setseed(PG_FUNCTION_ARGS);
+ extern Datum float8_combine(PG_FUNCTION_ARGS);
extern Datum float8_accum(PG_FUNCTION_ARGS);
extern Datum float4_accum(PG_FUNCTION_ARGS);
+#ifdef PGXC
+extern Datum float8_collect(PG_FUNCTION_ARGS);
+#endif
extern Datum float8_avg(PG_FUNCTION_ARGS);
extern Datum float8_var_pop(PG_FUNCTION_ARGS);
extern Datum float8_var_samp(PG_FUNCTION_ARGS);
extern Datum float8_stddev_pop(PG_FUNCTION_ARGS);
extern Datum float8_stddev_samp(PG_FUNCTION_ARGS);
extern Datum float8_regr_accum(PG_FUNCTION_ARGS);
+#ifdef PGXC
+extern Datum float8_regr_collect(PG_FUNCTION_ARGS);
+#endif
+ extern Datum float8_regr_combine(PG_FUNCTION_ARGS);
extern Datum float8_regr_sxx(PG_FUNCTION_ARGS);
extern Datum float8_regr_syy(PG_FUNCTION_ARGS);
extern Datum float8_regr_sxy(PG_FUNCTION_ARGS);
extern Datum int2_accum(PG_FUNCTION_ARGS);
extern Datum int4_accum(PG_FUNCTION_ARGS);
extern Datum int8_accum(PG_FUNCTION_ARGS);
+#ifdef PGXC
+extern Datum numeric_collect(PG_FUNCTION_ARGS);
+extern Datum numeric_poly_collect(PG_FUNCTION_ARGS);
+#endif
+ extern Datum numeric_poly_combine(PG_FUNCTION_ARGS);
+ extern Datum numeric_poly_serialize(PG_FUNCTION_ARGS);
+ extern Datum numeric_poly_deserialize(PG_FUNCTION_ARGS);
extern Datum int2_accum_inv(PG_FUNCTION_ARGS);
extern Datum int4_accum_inv(PG_FUNCTION_ARGS);
extern Datum int8_accum_inv(PG_FUNCTION_ARGS);
extern Datum int8_avg_accum(PG_FUNCTION_ARGS);
+#ifdef PGXC
+extern Datum numeric_avg_collect(PG_FUNCTION_ARGS);
+#endif
+ extern Datum int8_avg_combine(PG_FUNCTION_ARGS);
+ extern Datum int8_avg_serialize(PG_FUNCTION_ARGS);
+ extern Datum int8_avg_deserialize(PG_FUNCTION_ARGS);
extern Datum numeric_avg(PG_FUNCTION_ARGS);
extern Datum numeric_sum(PG_FUNCTION_ARGS);
extern Datum numeric_var_pop(PG_FUNCTION_ARGS);
extern Datum int2_sum(PG_FUNCTION_ARGS);
extern Datum int4_sum(PG_FUNCTION_ARGS);
extern Datum int8_sum(PG_FUNCTION_ARGS);
+#ifdef PGXC
+extern Datum int8_sum_to_int8(PG_FUNCTION_ARGS);
+#endif
extern Datum int2_avg_accum(PG_FUNCTION_ARGS);
extern Datum int4_avg_accum(PG_FUNCTION_ARGS);
+#ifdef PGXC
+extern Datum int8_avg_collect(PG_FUNCTION_ARGS);
+#endif
+ extern Datum int4_avg_combine(PG_FUNCTION_ARGS);
extern Datum int2_avg_accum_inv(PG_FUNCTION_ARGS);
extern Datum int4_avg_accum_inv(PG_FUNCTION_ARGS);
extern Datum int8_avg_accum_inv(PG_FUNCTION_ARGS);
* External declarations pertaining to backend/utils/misc/guc.c and
* backend/utils/misc/guc-file.l
*
- * Copyright (c) 2000-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Copyright (c) 2000-2016, PostgreSQL Global Development Group
*
* src/include/utils/guc.h
* lsyscache.h
* Convenience routines for common queries in the system catalog cache.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/utils/lsyscache.h
*
* See plancache.c for comments.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/utils/plancache.h
double generic_cost; /* cost of generic plan, or -1 if not known */
double total_custom_cost; /* total cost of custom plans so far */
int num_custom_plans; /* number of plans included in total */
- bool hasRowSecurity; /* planned with row security? */
- bool row_security_env; /* row security setting when planned */
+#ifdef PGXC
+ char *stmt_name; /* If set, this is a copy of prepared stmt name */
+#endif
} CachedPlanSource;
/*
* to look like NO SCROLL cursors.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/utils/portal.h
*/
Tuplestorestate *holdStore; /* store for holdable cursors */
MemoryContext holdContext; /* memory containing holdStore */
+#ifdef XCP
+ MemoryContext tmpContext; /* temporary memory */
+#endif
+ /*
+ * Snapshot under which tuples in the holdStore were read. We must keep a
+ * reference to this snapshot if there is any possibility that the tuples
+ * contain TOAST references, because releasing the snapshot could allow
+ * recently-dead rows to be vacuumed away, along with any toast data
+ * belonging to them. In the case of a held cursor, we avoid needing to
+ * keep such a snapshot by forcibly detoasting the data.
+ */
+ Snapshot holdSnapshot; /* registered snapshot, or NULL if none */
+
/*
* atStart, atEnd and portalPos indicate the current cursor position.
* portalPos is zero before the first row, N after fetching N'th row of
* POSTGRES relation descriptor (a/k/a relcache entry) definitions.
*
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/utils/rel.h
*
/* use "struct" here to avoid needing to include pgstat.h: */
struct PgStat_TableStatus *pgstat_info; /* statistics collection area */
+#ifdef PGXC
+ RelationLocInfo *rd_locator_info;
+#endif
} RelationData;
+
+ /*
+ * ForeignKeyCacheInfo
+ * Information the relcache can cache about foreign key constraints
+ *
+ * This is basically just an image of relevant columns from pg_constraint.
+ * We make it a subclass of Node so that copyObject() can be used on a list
+ * of these, but we also ensure it is a "flat" object without substructure,
+ * so that list_free_deep() is sufficient to free such a list.
+ * The per-FK-column arrays can be fixed-size because we allow at most
+ * INDEX_MAX_KEYS columns in a foreign key constraint.
+ *
+ * Currently, we only cache fields of interest to the planner, but the
+ * set of fields could be expanded in future.
+ */
+ typedef struct ForeignKeyCacheInfo
+ {
+ NodeTag type;
+ Oid conrelid; /* relation constrained by the foreign key */
+ Oid confrelid; /* relation referenced by the foreign key */
+ int nkeys; /* number of columns in the foreign key */
+ /* these arrays each have nkeys valid entries: */
+ AttrNumber conkey[INDEX_MAX_KEYS]; /* cols in referencing table */
+ AttrNumber confkey[INDEX_MAX_KEYS]; /* cols in referenced table */
+ Oid conpfeqop[INDEX_MAX_KEYS]; /* PK = FK operator OIDs */
+ } ForeignKeyCacheInfo;
+
+
/*
* StdRdOptions
* Standard contents of rd_options for heaps and generic indexes.
* snapshot.h
* POSTGRES snapshot definition
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/utils/snapshot.h
*
* See also lsyscache.h, which provides convenience routines for
* common cache-lookup operations.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
*
* src/include/utils/syscache.h
*
extern Datum mul_d_interval(PG_FUNCTION_ARGS);
extern Datum interval_div(PG_FUNCTION_ARGS);
extern Datum interval_accum(PG_FUNCTION_ARGS);
+#ifdef PGXC
+extern Datum interval_collect(PG_FUNCTION_ARGS);
+#endif
+ extern Datum interval_combine(PG_FUNCTION_ARGS);
extern Datum interval_accum_inv(PG_FUNCTION_ARGS);
extern Datum interval_avg(PG_FUNCTION_ARGS);
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/utils/tuplesort.h
* Also, we have changed the API to return tuples in TupleTableSlots,
* so that there is a check to prevent attempted access to system columns.
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+ * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/utils/tuplestore.h
#include "utils/rel.h"
#include "utils/snapmgr.h"
#include "utils/typcache.h"
+#ifdef XCP
+#include "pgxc/pgxc.h"
+#endif
- static const char *const raise_skip_msg = "RAISE";
-
typedef struct
{
int nargs; /* number of arguments */
syntax_errcontext.previous = error_context_stack;
error_context_stack = &syntax_errcontext;
- oldCxt = MemoryContextSwitchTo(compile_tmp_cxt);
+ oldCxt = MemoryContextSwitchTo(plpgsql_compile_tmp_cxt);
- (void) raw_parser(stmt);
+ (void) raw_parser(stmt, NULL);
MemoryContextSwitchTo(oldCxt);
/* Restore former ereport callback */
9999
(1 row)
- explain (costs off, nodes off)
+ -- the planner may choose a generic aggregate here if parallel query is
+ -- enabled, since that plan will be parallel safe and the "optimized"
+ -- plan, which has almost identical cost, will not be. we want to test
+ -- the optimized plan, so temporarily disable parallel query.
+ begin;
+ set local max_parallel_workers_per_gather = 0;
+ explain (costs off)
select max(unique1) from tenk1 where unique1 > 42000;
- QUERY PLAN
----------------------------------------------------------------------------
+ QUERY PLAN
+---------------------------------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
(1 row)
+ rollback;
-- multi-column index (uses tenk1_thous_tenthous)
-explain (costs off)
+explain (costs off, nodes off)
select max(tenthous) from tenk1 where thousand = 33;
- QUERY PLAN
-----------------------------------------------------------------------------
+ QUERY PLAN
+----------------------------------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
insert into minmaxtest1 values(13), (14);
insert into minmaxtest2 values(15), (16);
insert into minmaxtest3 values(17), (18);
-explain (costs off)
+explain (costs off, nodes off)
select min(f1), max(f1) from minmaxtest;
- QUERY PLAN
- -------------------------------------------------
- Aggregate
- -> Remote Subquery Scan on all
- -> Aggregate
- -> Append
- -> Seq Scan on minmaxtest
- -> Seq Scan on minmaxtest1
- -> Seq Scan on minmaxtest2
- -> Seq Scan on minmaxtest3
- (8 rows)
+ QUERY PLAN
+ ----------------------------------------------------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Merge Append
+ Sort Key: minmaxtest.f1
+ -> Index Only Scan using minmaxtesti on minmaxtest
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan using minmaxtest1i on minmaxtest1
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan Backward using minmaxtest2i on minmaxtest2
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan using minmaxtest3i on minmaxtest3
+ InitPlan 2 (returns $1)
+ -> Limit
+ -> Merge Append
+ Sort Key: minmaxtest_1.f1 DESC
+ -> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_1
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest1_1
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest2_1
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest3_1
+ (23 rows)
select min(f1), max(f1) from minmaxtest;
min | max
-4567890123456789
(1 row)
+-- int8 aggregates for distributed tables
+CREATE TABLE int8_tbl_aggtest AS SELECT * FROM int8_tbl;
+SELECT avg(q1) FROM int8_tbl_aggtest;
+ avg
+-----------------------
+ 2740734074074122.6000
+(1 row)
+
+SELECT sum(q1) FROM int8_tbl_aggtest;
+ sum
+-------------------
+ 13703670370370613
+(1 row)
+
+SELECT max(q1) FROM int8_tbl_aggtest;
+ max
+------------------
+ 4567890123456789
+(1 row)
+
+SELECT min(q1) FROM int8_tbl_aggtest;
+ min
+-----
+ 123
+(1 row)
+
+SELECT stddev_pop(q1) FROM int8_tbl_aggtest;
+ stddev_pop
+------------------
+ 2237800000713538
+(1 row)
+
+SELECT stddev_samp(q1) FROM int8_tbl_aggtest;
+ stddev_samp
+------------------
+ 2501936460822274
+(1 row)
+
+SELECT var_pop(q1) FROM int8_tbl_aggtest;
+ var_pop
+---------------------------------
+ 5007748843193509284246811160533
+(1 row)
+
+SELECT var_samp(q1) FROM int8_tbl_aggtest;
+ var_samp
+---------------------------------
+ 6259686053991886605308513950667
+(1 row)
+
+DROP TABLE int8_tbl_aggtest;
++
+ -- test aggregates with common transition functions share the same states
+ begin work;
+ create type avg_state as (total bigint, count bigint);
+ create or replace function avg_transfn(state avg_state, n int) returns avg_state as
+ $$
+ declare new_state avg_state;
+ begin
+ raise notice 'avg_transfn called with %', n;
+ if state is null then
+ if n is not null then
+ new_state.total := n;
+ new_state.count := 1;
+ return new_state;
+ end if;
+ return null;
+ elsif n is not null then
+ state.total := state.total + n;
+ state.count := state.count + 1;
+ return state;
+ end if;
+
+ return null;
+ end
+ $$ language plpgsql;
+ create function avg_finalfn(state avg_state) returns int4 as
+ $$
+ begin
+ if state is null then
+ return NULL;
+ else
+ return state.total / state.count;
+ end if;
+ end
+ $$ language plpgsql;
+ create function sum_finalfn(state avg_state) returns int4 as
+ $$
+ begin
+ if state is null then
+ return NULL;
+ else
+ return state.total;
+ end if;
+ end
+ $$ language plpgsql;
+ create aggregate my_avg(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn
+ );
+ create aggregate my_sum(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = sum_finalfn
+ );
+ -- aggregate state should be shared as aggs are the same.
+ select my_avg(one),my_avg(one) from (values(1),(3)) t(one);
+ NOTICE: avg_transfn called with 1
+ NOTICE: avg_transfn called with 3
+ my_avg | my_avg
+ --------+--------
+ 2 | 2
+ (1 row)
+
+ -- aggregate state should be shared as transfn is the same for both aggs.
+ select my_avg(one),my_sum(one) from (values(1),(3)) t(one);
+ NOTICE: avg_transfn called with 1
+ NOTICE: avg_transfn called with 3
+ my_avg | my_sum
+ --------+--------
+ 2 | 4
+ (1 row)
+
+ -- shouldn't share states due to the distinctness not matching.
+ select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one);
+ NOTICE: avg_transfn called with 1
+ NOTICE: avg_transfn called with 3
+ NOTICE: avg_transfn called with 1
+ NOTICE: avg_transfn called with 3
+ my_avg | my_sum
+ --------+--------
+ 2 | 4
+ (1 row)
+
+ -- shouldn't share states due to the filter clause not matching.
+ select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one);
+ NOTICE: avg_transfn called with 1
+ NOTICE: avg_transfn called with 3
+ NOTICE: avg_transfn called with 3
+ my_avg | my_sum
+ --------+--------
+ 3 | 4
+ (1 row)
+
+ -- this should not share the state due to different input columns.
+ select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two);
+ NOTICE: avg_transfn called with 2
+ NOTICE: avg_transfn called with 1
+ NOTICE: avg_transfn called with 4
+ NOTICE: avg_transfn called with 3
+ my_avg | my_sum
+ --------+--------
+ 2 | 6
+ (1 row)
+
+ -- test that aggs with the same sfunc and initcond share the same agg state
+ create aggregate my_sum_init(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = sum_finalfn,
+ initcond = '(10,0)'
+ );
+ create aggregate my_avg_init(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn,
+ initcond = '(10,0)'
+ );
+ create aggregate my_avg_init2(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn,
+ initcond = '(4,0)'
+ );
+ -- state should be shared if INITCONDs are matching
+ select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one);
+ NOTICE: avg_transfn called with 1
+ NOTICE: avg_transfn called with 3
+ my_sum_init | my_avg_init
+ -------------+-------------
+ 14 | 7
+ (1 row)
+
+ -- Varying INITCONDs should cause the states not to be shared.
+ select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one);
+ NOTICE: avg_transfn called with 1
+ NOTICE: avg_transfn called with 1
+ NOTICE: avg_transfn called with 3
+ NOTICE: avg_transfn called with 3
+ my_sum_init | my_avg_init2
+ -------------+--------------
+ 14 | 4
+ (1 row)
+
+ rollback;
+ -- test aggregate state sharing to ensure it works if one aggregate has a
+ -- finalfn and the other one has none.
+ begin work;
+ create or replace function sum_transfn(state int4, n int4) returns int4 as
+ $$
+ declare new_state int4;
+ begin
+ raise notice 'sum_transfn called with %', n;
+ if state is null then
+ if n is not null then
+ new_state := n;
+ return new_state;
+ end if;
+ return null;
+ elsif n is not null then
+ state := state + n;
+ return state;
+ end if;
+
+ return null;
+ end
+ $$ language plpgsql;
+ create function halfsum_finalfn(state int4) returns int4 as
+ $$
+ begin
+ if state is null then
+ return NULL;
+ else
+ return state / 2;
+ end if;
+ end
+ $$ language plpgsql;
+ create aggregate my_sum(int4)
+ (
+ stype = int4,
+ sfunc = sum_transfn
+ );
+ create aggregate my_half_sum(int4)
+ (
+ stype = int4,
+ sfunc = sum_transfn,
+ finalfunc = halfsum_finalfn
+ );
+ -- Agg state should be shared even though my_sum has no finalfn
+ select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one);
+ NOTICE: sum_transfn called with 1
+ NOTICE: sum_transfn called with 2
+ NOTICE: sum_transfn called with 3
+ NOTICE: sum_transfn called with 4
+ my_sum | my_half_sum
+ --------+-------------
+ 10 | 5
+ (1 row)
+
+ rollback;
-- after validation, the constraint should be used
alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check;
-explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
- QUERY PLAN
----------------------------------------------------------------------------
- Append
- -> Seq Scan on nv_parent
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
- -> Seq Scan on nv_child_2010
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
- -> Seq Scan on nv_child_2009
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
-(7 rows)
+explain (costs off, nodes off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
+ QUERY PLAN
+---------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Append
+ -> Seq Scan on nv_parent
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+ -> Seq Scan on nv_child_2010
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+ -> Seq Scan on nv_child_2009
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+(8 rows)
+ -- add an inherited NOT VALID constraint
+ alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid;
+ \d nv_child_2009
+ Table "public.nv_child_2009"
+ Column | Type | Modifiers
+ --------+------+-----------
+ d | date |
+ Check constraints:
+ "nv_child_2009_d_check" CHECK (d >= '01-01-2009'::date AND d <= '12-31-2009'::date)
+ "nv_parent_d_check" CHECK (d >= '01-01-2001'::date AND d <= '12-31-2099'::date) NOT VALID
+ Inherits: nv_parent
+
+ -- we leave nv_parent and children around to help test pg_dump logic
-- Foreign key adding test with mixed types
-- Note: these tables are TEMP to avoid name conflicts when this test
-- is run in parallel with foreign_key.sql.
ERROR: column "f1" does not exist
LINE 1: select f1 from c1;
^
- HINT: Perhaps you meant to reference the column "c1"."f2".
+ HINT: Perhaps you meant to reference the column "c1.f2".
drop table p1 cascade;
NOTICE: drop cascades to table c1
-create table p1 (f1 int, f2 int);
-create table c1 () inherits(p1);
+create table p1 (f1 int, f2 int) distribute by roundrobin;
+create table c1 () inherits(p1) distribute by roundrobin;
-- should be rejected since c1.f1 is inherited
alter table c1 drop column f1;
ERROR: cannot drop inherited column "f1"
ERROR: column "f1" does not exist
LINE 1: select f1 from c1;
^
- HINT: Perhaps you meant to reference the column "c1"."f2".
+ HINT: Perhaps you meant to reference the column "c1.f2".
drop table p1 cascade;
NOTICE: drop cascades to table c1
-create table p1 (f1 int, f2 int);
-create table c1 () inherits(p1);
+create table p1 (f1 int, f2 int) distribute by roundrobin;
+create table c1 () inherits(p1) distribute by roundrobin;
-- should be rejected since c1.f1 is inherited
alter table c1 drop column f1;
ERROR: cannot drop inherited column "f1"
commit;
begin;
alter table alterlock2 validate constraint alterlock2nv;
+ERROR: constraint "alterlock2nv" of relation "alterlock2" does not exist
select * from my_locks order by 1;
- relname | max_lockmode
------------------+--------------------------
- alterlock | RowShareLock
- alterlock2 | ShareUpdateExclusiveLock
- alterlock2_pkey | AccessShareLock
- alterlock_pkey | AccessShareLock
-(4 rows)
-
+ERROR: current transaction is aborted, commands ignored until end of transaction block
rollback;
+ create or replace view my_locks as
+ select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode
+ from pg_locks l join pg_class c on l.relation = c.oid
+ where virtualtransaction = (
+ select virtualtransaction
+ from pg_locks
+ where transactionid = txid_current()::integer)
+ and locktype = 'relation'
+ and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog')
+ and c.relname = 'my_locks'
+ group by c.relname;
+ -- raise exception
+ alter table my_locks set (autovacuum_enabled = false);
+ ERROR: unrecognized parameter "autovacuum_enabled"
+ alter view my_locks set (autovacuum_enabled = false);
+ ERROR: unrecognized parameter "autovacuum_enabled"
+ alter table my_locks reset (autovacuum_enabled);
+ alter view my_locks reset (autovacuum_enabled);
+ begin;
+ alter view my_locks set (security_barrier=off);
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ ----------+---------------------
+ my_locks | AccessExclusiveLock
+ (1 row)
+
+ alter view my_locks reset (security_barrier);
+ rollback;
+ -- this test intentionally applies the ALTER TABLE command against a view, but
+ -- uses a view option so we expect this to succeed. This form of SQL is
+ -- accepted for historical reasons, as shown in the docs for ALTER VIEW
+ begin;
+ alter table my_locks set (security_barrier=off);
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ ----------+---------------------
+ my_locks | AccessExclusiveLock
+ (1 row)
+
+ alter table my_locks reset (security_barrier);
+ rollback;
-- cleanup
drop table alterlock2;
drop table alterlock;
(5 rows)
CREATE TABLE logged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged1); -- foreign key
+ERROR: Hash/Modulo distribution column does not refer to hash/modulo distribution column in referenced table.
CREATE TABLE logged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged3); -- self-referencing foreign key
ALTER TABLE logged1 SET UNLOGGED; -- fails because a foreign key from a permanent table exists
+ ERROR: could not change table "logged1" to unlogged because it references logged table "logged2"
ALTER TABLE logged3 SET UNLOGGED; -- skip self-referencing foreign key
ALTER TABLE logged2 SET UNLOGGED;
+ERROR: relation "logged2" does not exist
ALTER TABLE logged1 SET UNLOGGED;
-- check relpersistence of a permanent table after changing to unlogged
SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1'
ALTER TABLE logged1 SET UNLOGGED; -- silently do nothing
DROP TABLE logged3;
DROP TABLE logged2;
+ERROR: table "logged2" does not exist
DROP TABLE logged1;
+ -- test ADD COLUMN IF NOT EXISTS
+ CREATE TABLE test_add_column(c1 integer);
+ \d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Modifiers
+ --------+---------+-----------
+ c1 | integer |
+
+ ALTER TABLE test_add_column
+ ADD COLUMN c2 integer;
+ \d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Modifiers
+ --------+---------+-----------
+ c1 | integer |
+ c2 | integer |
+
+ ALTER TABLE test_add_column
+ ADD COLUMN c2 integer; -- fail because c2 already exists
+ ERROR: column "c2" of relation "test_add_column" already exists
+ \d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Modifiers
+ --------+---------+-----------
+ c1 | integer |
+ c2 | integer |
+
+ ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists
+ NOTICE: column "c2" of relation "test_add_column" already exists, skipping
+ \d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Modifiers
+ --------+---------+-----------
+ c1 | integer |
+ c2 | integer |
+
+ ALTER TABLE test_add_column
+ ADD COLUMN c2 integer, -- fail because c2 already exists
+ ADD COLUMN c3 integer;
+ ERROR: column "c2" of relation "test_add_column" already exists
+ \d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Modifiers
+ --------+---------+-----------
+ c1 | integer |
+ c2 | integer |
+
+ ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+ ADD COLUMN c3 integer; -- fail because c3 already exists
+ NOTICE: column "c2" of relation "test_add_column" already exists, skipping
+ \d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Modifiers
+ --------+---------+-----------
+ c1 | integer |
+ c2 | integer |
+ c3 | integer |
+
+ ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+ ADD COLUMN IF NOT EXISTS c3 integer; -- skipping because c3 already exists
+ NOTICE: column "c2" of relation "test_add_column" already exists, skipping
+ NOTICE: column "c3" of relation "test_add_column" already exists, skipping
+ \d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Modifiers
+ --------+---------+-----------
+ c1 | integer |
+ c2 | integer |
+ c3 | integer |
+
+ ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+ ADD COLUMN IF NOT EXISTS c3 integer, -- skipping because c3 already exists
+ ADD COLUMN c4 integer;
+ NOTICE: column "c2" of relation "test_add_column" already exists, skipping
+ NOTICE: column "c3" of relation "test_add_column" already exists, skipping
+ \d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Modifiers
+ --------+---------+-----------
+ c1 | integer |
+ c2 | integer |
+ c3 | integer |
+ c4 | integer |
+
+ DROP TABLE test_add_column;
{16,25,23} | {} | {foobar,new_word} | {{elt2}}
(3 rows)
+ SELECT b[1:1][2][2],
+ d[1:1][2]
+ FROM arrtest;
+ b | d
+ -----------------------+---------------
+ {{{113,142},{1,147}}} | {}
+ {} | {}
+ {} | {{elt1,elt2}}
+ (3 rows)
+
INSERT INTO arrtest(a) VALUES('{1,null,3}');
-SELECT a FROM arrtest;
+SELECT a FROM arrtest ORDER BY 1;
a
---------------
- {16,25,3,4,5}
{}
- {16,25,23}
{1,NULL,3}
+ {16,25,3,4,5}
+ {16,25,23}
(4 rows)
UPDATE arrtest SET a[4] = NULL WHERE a[2] IS NULL;
ELSE (3 * j) END
FROM CASE2_TBL b
WHERE j = -CASE_TBL.i;
-SELECT * FROM CASE_TBL;
+SELECT * FROM CASE_TBL ORDER BY i, f;
i | f
-----+-------
- 8 | 20.2
- -9 | -30.3
-12 |
+ -9 | -30.3
-8 | 10.1
+ 8 | 20.2
(4 rows)
+ --
+ -- Nested CASE expressions
+ --
+ -- This test exercises a bug caused by aliasing econtext->caseValue_isNull
+ -- with the isNull argument of the inner CASE's ExecEvalCase() call. After
+ -- evaluating the vol(null) expression in the inner CASE's second WHEN-clause,
+ -- the isNull flag for the case test value incorrectly became true, causing
+ -- the third WHEN-clause not to match. The volatile function calls are needed
+ -- to prevent constant-folding in the planner, which would hide the bug.
+ CREATE FUNCTION vol(text) returns text as
+ 'begin return $1; end' language plpgsql volatile;
+ SELECT CASE
+ (CASE vol('bar')
+ WHEN 'foo' THEN 'it was foo!'
+ WHEN vol(null) THEN 'null input'
+ WHEN 'bar' THEN 'it was bar!' END
+ )
+ WHEN 'it was foo!' THEN 'foo recognized'
+ WHEN 'it was bar!' THEN 'bar recognized'
+ ELSE 'unrecognized' END;
+ case
+ ----------------
+ bar recognized
+ (1 row)
+
+ -- In this case, we can't inline the SQL function without confusing things.
+ CREATE DOMAIN foodomain AS text;
+ CREATE FUNCTION volfoo(text) returns foodomain as
+ 'begin return $1::foodomain; end' language plpgsql volatile;
+ CREATE FUNCTION inline_eq(foodomain, foodomain) returns boolean as
+ 'SELECT CASE $2::text WHEN $1::text THEN true ELSE false END' language sql;
+ CREATE OPERATOR = (procedure = inline_eq,
+ leftarg = foodomain, rightarg = foodomain);
+ SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' END;
+ case
+ ------------
+ is not foo
+ (1 row)
+
--
-- Clean up
--
DROP VIEW ctlv1;
DROP TABLE IF EXISTS ctlt4, ctlt10, ctlt11, ctlt11a, ctlt12;
NOTICE: table "ctlt10" does not exist, skipping
+NOTICE: table "ctlt11" does not exist, skipping
+NOTICE: table "ctlt11a" does not exist, skipping
+ /* LIKE WITH OIDS */
+ CREATE TABLE has_oid (x INTEGER) WITH OIDS;
+ CREATE TABLE no_oid (y INTEGER);
+ CREATE TABLE like_test (z INTEGER, LIKE has_oid);
+ SELECT oid FROM like_test;
+ oid
+ -----
+ (0 rows)
+
+ CREATE TABLE like_test2 (z INTEGER, LIKE no_oid);
+ SELECT oid FROM like_test2; -- fail
+ ERROR: column "oid" does not exist
+ LINE 1: SELECT oid FROM like_test2;
+ ^
+ CREATE TABLE like_test3 (z INTEGER, LIKE has_oid, LIKE no_oid);
+ SELECT oid FROM like_test3;
+ oid
+ -----
+ (0 rows)
+
+ CREATE TABLE like_test4 (z INTEGER, PRIMARY KEY(oid), LIKE has_oid);
+ SELECT oid FROM like_test4;
+ oid
+ -----
+ (0 rows)
+
+ DROP TABLE has_oid, no_oid, like_test, like_test2, like_test3, like_test4;
-- OK
create event trigger regress_event_trigger_end on ddl_command_end
execute procedure test_event_trigger();
+ERROR: EVENT TRIGGER not yet supported in Postgres-XL
-- should fail, food is not a valid filter variable
create event trigger regress_event_trigger2 on ddl_command_start
- when food in ('sandwhich')
+ when food in ('sandwich')
execute procedure test_event_trigger();
-ERROR: unrecognized filter variable "food"
--- should fail, sandwich is not a valid command tag
+ERROR: EVENT TRIGGER not yet supported in Postgres-XL
+-- should fail, sandwhich is not a valid command tag
create event trigger regress_event_trigger2 on ddl_command_start
- when tag in ('sandwhich')
+ when tag in ('sandwich')
execute procedure test_event_trigger();
-ERROR: filter value "sandwich" not recognized for filter variable "tag"
+ERROR: EVENT TRIGGER not yet supported in Postgres-XL
-- should fail, create skunkcabbage is not a valid command tag
create event trigger regress_event_trigger2 on ddl_command_start
when tag in ('create table', 'create skunkcabbage')
comment on event trigger wrong.regress_event_trigger is 'test comment';
ERROR: event trigger name cannot be qualified
-- drop as non-superuser should fail
- create role regression_bob;
- set role regression_bob;
+ create role regress_evt_user;
+ set role regress_evt_user;
create event trigger regress_event_trigger_noperms on ddl_command_start
execute procedure test_event_trigger();
-ERROR: permission denied to create event trigger "regress_event_trigger_noperms"
-HINT: Must be superuser to create an event trigger.
+ERROR: EVENT TRIGGER not yet supported in Postgres-XL
reset role;
-- all OK
alter event trigger regress_event_trigger enable replica;
drop event trigger if exists regress_event_trigger2;
NOTICE: event trigger "regress_event_trigger2" does not exist, skipping
drop event trigger regress_event_trigger3;
+ERROR: event trigger "regress_event_trigger3" does not exist
drop event trigger regress_event_trigger_end;
+ERROR: event trigger "regress_event_trigger_end" does not exist
-- test support for dropped objects
- CREATE SCHEMA schema_one authorization regression_bob;
- CREATE SCHEMA schema_two authorization regression_bob;
- CREATE SCHEMA audit_tbls authorization regression_bob;
+ CREATE SCHEMA schema_one authorization regress_evt_user;
+ CREATE SCHEMA schema_two authorization regress_evt_user;
+ CREATE SCHEMA audit_tbls authorization regress_evt_user;
CREATE TEMP TABLE a_temp_tbl ();
- SET SESSION AUTHORIZATION regression_bob;
+ SET SESSION AUTHORIZATION regress_evt_user;
CREATE TABLE schema_one.table_one(a int);
CREATE TABLE schema_one."table two"(a int);
CREATE TABLE schema_one.table_three(a int);
drop cascades to table schema_one.table_one
drop cascades to table schema_one."table two"
drop cascades to table schema_one.table_three
+ NOTICE: table "schema_two_table_two" does not exist, skipping
+ NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping
+ ERROR: object audit_tbls.schema_two_table_three of type table cannot be dropped
+ CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE
+ SQL statement "DROP TABLE IF EXISTS audit_tbls.schema_two_table_three"
+ PL/pgSQL function test_evtrig_dropped_objects() line 8 at EXECUTE
DELETE FROM undroppable_objs WHERE object_identity = 'audit_tbls.schema_two_table_three';
DROP SCHEMA schema_one, schema_two CASCADE;
- ERROR: schema "schema_one" does not exist
+ NOTICE: drop cascades to 7 other objects
+ DETAIL: drop cascades to table schema_two.table_two
+ drop cascades to table schema_two.table_three
+ drop cascades to function schema_two.add(integer,integer)
+ drop cascades to function schema_two.newton(integer)
+ drop cascades to table schema_one.table_one
+ drop cascades to table schema_one."table two"
+ drop cascades to table schema_one.table_three
+ NOTICE: table "schema_two_table_two" does not exist, skipping
+ NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping
+ NOTICE: table "schema_one_table_one" does not exist, skipping
+ NOTICE: table "schema_one_table two" does not exist, skipping
+ NOTICE: table "schema_one_table_three" does not exist, skipping
+ ERROR: object schema_one.table_three of type table cannot be dropped
+ CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE
DELETE FROM undroppable_objs WHERE object_identity = 'schema_one.table_three';
DROP SCHEMA schema_one, schema_two CASCADE;
- ERROR: schema "schema_one" does not exist
+ NOTICE: drop cascades to 7 other objects
+ DETAIL: drop cascades to table schema_two.table_two
+ drop cascades to table schema_two.table_three
+ drop cascades to function schema_two.add(integer,integer)
+ drop cascades to function schema_two.newton(integer)
+ drop cascades to table schema_one.table_one
+ drop cascades to table schema_one."table two"
+ drop cascades to table schema_one.table_three
+ NOTICE: table "schema_two_table_two" does not exist, skipping
+ NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping
+ NOTICE: table "schema_one_table_one" does not exist, skipping
+ NOTICE: table "schema_one_table two" does not exist, skipping
+ NOTICE: table "schema_one_table_three" does not exist, skipping
SELECT * FROM dropped_objects WHERE schema IS NULL OR schema <> 'pg_toast';
- type | schema | object
---------------+------------+-------------------------------------
- table column | schema_one | schema_one.table_one.a
- schema | | schema_two
- table | schema_two | schema_two.table_two
- type | schema_two | schema_two.table_two
- type | schema_two | schema_two.table_two[]
- table | audit_tbls | audit_tbls.schema_two_table_three
- type | audit_tbls | audit_tbls.schema_two_table_three
- type | audit_tbls | audit_tbls.schema_two_table_three[]
- table | schema_two | schema_two.table_three
- type | schema_two | schema_two.table_three
- type | schema_two | schema_two.table_three[]
- function | schema_two | schema_two.add(integer,integer)
- aggregate | schema_two | schema_two.newton(integer)
- schema | | schema_one
- table | schema_one | schema_one.table_one
- type | schema_one | schema_one.table_one
- type | schema_one | schema_one.table_one[]
- table | schema_one | schema_one."table two"
- type | schema_one | schema_one."table two"
- type | schema_one | schema_one."table two"[]
- table | schema_one | schema_one.table_three
- type | schema_one | schema_one.table_three
- type | schema_one | schema_one.table_three[]
-(23 rows)
+ type | schema | object
+------+--------+--------
+(0 rows)
- DROP OWNED BY regression_bob;
+ DROP OWNED BY regress_evt_user;
+ NOTICE: schema "audit_tbls" does not exist, skipping
SELECT * FROM dropped_objects WHERE type = 'schema';
- type | schema | object
---------+--------+------------
- schema | | schema_two
- schema | | schema_one
- schema | | audit_tbls
-(3 rows)
+ type | schema | object
+------+--------+--------
+(0 rows)
- DROP ROLE regression_bob;
+ DROP ROLE regress_evt_user;
DROP EVENT TRIGGER regress_event_trigger_drop_objects;
+ERROR: event trigger "regress_event_trigger_drop_objects" does not exist
DROP EVENT TRIGGER undroppable;
+ERROR: event trigger "undroppable" does not exist
CREATE OR REPLACE FUNCTION event_trigger_report_dropped()
RETURNS event_trigger
LANGUAGE plpgsql
INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30');
INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200');
INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
-SELECT '' AS five, * FROM FLOAT8_TBL;
+SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1;
five | f1
------+-----------------------
- | 0
- | -34.84
- | -1004.3
| -1.2345678901234e+200
+ | -1004.3
+ | -34.84
| -1.2345678901234e-200
+ | 0
(5 rows)
+ -- test exact cases for trigonometric functions in degrees
+ SET extra_float_digits = 3;
+ SELECT x,
+ sind(x),
+ sind(x) IN (-1,-0.5,0,0.5,1) AS sind_exact
+ FROM (VALUES (0), (30), (90), (150), (180),
+ (210), (270), (330), (360)) AS t(x);
+ x | sind | sind_exact
+ -----+------+------------
+ 0 | 0 | t
+ 30 | 0.5 | t
+ 90 | 1 | t
+ 150 | 0.5 | t
+ 180 | 0 | t
+ 210 | -0.5 | t
+ 270 | -1 | t
+ 330 | -0.5 | t
+ 360 | 0 | t
+ (9 rows)
+
+ SELECT x,
+ cosd(x),
+ cosd(x) IN (-1,-0.5,0,0.5,1) AS cosd_exact
+ FROM (VALUES (0), (60), (90), (120), (180),
+ (240), (270), (300), (360)) AS t(x);
+ x | cosd | cosd_exact
+ -----+------+------------
+ 0 | 1 | t
+ 60 | 0.5 | t
+ 90 | 0 | t
+ 120 | -0.5 | t
+ 180 | -1 | t
+ 240 | -0.5 | t
+ 270 | 0 | t
+ 300 | 0.5 | t
+ 360 | 1 | t
+ (9 rows)
+
+ SELECT x,
+ tand(x),
+ tand(x) IN ('-Infinity'::float8,-1,0,
+ 1,'Infinity'::float8) AS tand_exact,
+ cotd(x),
+ cotd(x) IN ('-Infinity'::float8,-1,0,
+ 1,'Infinity'::float8) AS cotd_exact
+ FROM (VALUES (0), (45), (90), (135), (180),
+ (225), (270), (315), (360)) AS t(x);
+ x | tand | tand_exact | cotd | cotd_exact
+ -----+-----------+------------+-----------+------------
+ 0 | 0 | t | Infinity | t
+ 45 | 1 | t | 1 | t
+ 90 | Infinity | t | 0 | t
+ 135 | -1 | t | -1 | t
+ 180 | 0 | t | -Infinity | t
+ 225 | 1 | t | 1 | t
+ 270 | -Infinity | t | 0 | t
+ 315 | -1 | t | -1 | t
+ 360 | 0 | t | Infinity | t
+ (9 rows)
+
+ SELECT x,
+ asind(x),
+ asind(x) IN (-90,-30,0,30,90) AS asind_exact,
+ acosd(x),
+ acosd(x) IN (0,60,90,120,180) AS acosd_exact
+ FROM (VALUES (-1), (-0.5), (0), (0.5), (1)) AS t(x);
+ x | asind | asind_exact | acosd | acosd_exact
+ ------+-------+-------------+-------+-------------
+ -1 | -90 | t | 180 | t
+ -0.5 | -30 | t | 120 | t
+ 0 | 0 | t | 90 | t
+ 0.5 | 30 | t | 60 | t
+ 1 | 90 | t | 0 | t
+ (5 rows)
+
+ SELECT x,
+ atand(x),
+ atand(x) IN (-90,-45,0,45,90) AS atand_exact
+ FROM (VALUES ('-Infinity'::float8), (-1), (0), (1),
+ ('Infinity'::float8)) AS t(x);
+ x | atand | atand_exact
+ -----------+-------+-------------
+ -Infinity | -90 | t
+ -1 | -45 | t
+ 0 | 0 | t
+ 1 | 45 | t
+ Infinity | 90 | t
+ (5 rows)
+
+ SELECT x, y,
+ atan2d(y, x),
+ atan2d(y, x) IN (-90,0,90,180) AS atan2d_exact
+ FROM (SELECT 10*cosd(a), 10*sind(a)
+ FROM generate_series(0, 360, 90) AS t(a)) AS t(x,y);
+ x | y | atan2d | atan2d_exact
+ -----+-----+--------+--------------
+ 10 | 0 | 0 | t
+ 0 | 10 | 90 | t
+ -10 | 0 | 180 | t
+ 0 | -10 | -90 | t
+ 10 | 0 | 0 | t
+ (5 rows)
+
+ RESET extra_float_digits;
CREATE ROLE regress_test_role2;
CREATE ROLE regress_test_role_super SUPERUSER;
CREATE ROLE regress_test_indirect;
- CREATE ROLE unprivileged_role;
+ CREATE ROLE regress_unprivileged_role;
CREATE FOREIGN DATA WRAPPER dummy;
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless';
+ERROR: foreign-data wrapper "dummy" does not exist
CREATE FOREIGN DATA WRAPPER postgresql VALIDATOR postgresql_fdw_validator;
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
-- At this point we should have 2 built-in wrappers and no servers.
SELECT fdwname, fdwhandler::regproc, fdwvalidator::regproc, fdwoptions FROM pg_foreign_data_wrapper ORDER BY 1, 2, 3;
- fdwname | fdwhandler | fdwvalidator | fdwoptions
-------------+------------+--------------------------+------------
- dummy | - | - |
- postgresql | - | postgresql_fdw_validator |
-(2 rows)
+ fdwname | fdwhandler | fdwvalidator | fdwoptions
+---------+------------+--------------+------------
+(0 rows)
SELECT srvname, srvoptions FROM pg_foreign_server;
srvname | srvoptions
-- CREATE FOREIGN DATA WRAPPER
CREATE FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR
-ERROR: function bar(text[], oid) does not exist
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
CREATE FOREIGN DATA WRAPPER foo;
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
\dew
- List of foreign-data wrappers
- Name | Owner | Handler | Validator
- ------+-------+---------+-----------
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator
+ ------------+---------------------------+---------+--------------------------
- dummy | regress_foreign_data_user | - | -
- foo | regress_foreign_data_user | - | -
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator
-(3 rows)
+(0 rows)
CREATE FOREIGN DATA WRAPPER foo; -- duplicate
-ERROR: foreign-data wrapper "foo" already exists
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
DROP FOREIGN DATA WRAPPER foo;
+ERROR: foreign-data wrapper "foo" does not exist
CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1');
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
+ ------------+---------------------------+---------+--------------------------+-------------------+---------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (testing '1') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
+(0 rows)
DROP FOREIGN DATA WRAPPER foo;
+ERROR: foreign-data wrapper "foo" does not exist
CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', testing '2'); -- ERROR
-ERROR: option "testing" provided more than once
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', another '2');
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
+ ------------+---------------------------+---------+--------------------------+-------------------+----------------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (testing '1', another '2') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
+(0 rows)
DROP FOREIGN DATA WRAPPER foo;
+ERROR: foreign-data wrapper "foo" does not exist
SET ROLE regress_test_role;
CREATE FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: permission denied to create foreign-data wrapper "foo"
-HINT: Must be superuser to create a foreign-data wrapper.
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
RESET ROLE;
CREATE FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator;
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
+ ------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | postgresql_fdw_validator | | |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
+(0 rows)
-- ALTER FOREIGN DATA WRAPPER
ALTER FOREIGN DATA WRAPPER foo; -- ERROR
LINE 1: ALTER FOREIGN DATA WRAPPER foo;
^
ALTER FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR
-ERROR: function bar(text[], oid) does not exist
+ERROR: foreign-data wrapper "foo" does not exist
ALTER FOREIGN DATA WRAPPER foo NO VALIDATOR;
+ERROR: foreign-data wrapper "foo" does not exist
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
+ ------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
+(0 rows)
ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '1', b '2');
+ERROR: foreign-data wrapper "foo" does not exist
ALTER FOREIGN DATA WRAPPER foo OPTIONS (SET c '4'); -- ERROR
-ERROR: option "c" not found
+ERROR: foreign-data wrapper "foo" does not exist
ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP c); -- ERROR
-ERROR: option "c" not found
+ERROR: foreign-data wrapper "foo" does not exist
ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD x '1', DROP x);
+ERROR: foreign-data wrapper "foo" does not exist
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
+ ------------+---------------------------+---------+--------------------------+-------------------+----------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (a '1', b '2') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
+(0 rows)
ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP a, SET b '3', ADD c '4');
+ERROR: foreign-data wrapper "foo" does not exist
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
+ ------------+---------------------------+---------+--------------------------+-------------------+----------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (b '3', c '4') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
+(0 rows)
ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '2');
+ERROR: foreign-data wrapper "foo" does not exist
ALTER FOREIGN DATA WRAPPER foo OPTIONS (b '4'); -- ERROR
-ERROR: option "b" provided more than once
+ERROR: foreign-data wrapper "foo" does not exist
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
+ ------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
+(0 rows)
SET ROLE regress_test_role;
ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); -- ERROR
HINT: Must be superuser to alter a foreign-data wrapper.
SET ROLE regress_test_role_super;
ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5');
+ERROR: foreign-data wrapper "foo" does not exist
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
-------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2', d '5') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
++ List of foreign-data wrappers
++ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
+(0 rows)
ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role; -- ERROR
ERROR: permission denied to change owner of foreign-data wrapper "foo"
HINT: Must be superuser to alter a foreign-data wrapper.
RESET ROLE;
\dew+
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
+ List of foreign-data wrappers
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
+ ------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo1 | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
++(0 rows)
+
+ ALTER FOREIGN DATA WRAPPER foo RENAME TO foo1;
+ \dew+
+ List of foreign-data wrappers
+ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
+ ------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
+(0 rows)
-ALTER FOREIGN DATA WRAPPER foo1 RENAME TO foo;
-- DROP FOREIGN DATA WRAPPER
DROP FOREIGN DATA WRAPPER nonexistent; -- ERROR
ERROR: foreign-data wrapper "nonexistent" does not exist
DROP FOREIGN DATA WRAPPER IF EXISTS nonexistent;
NOTICE: foreign-data wrapper "nonexistent" does not exist, skipping
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
-------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
++ List of foreign-data wrappers
++ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
+(0 rows)
DROP ROLE regress_test_role_super; -- ERROR
-ERROR: role "regress_test_role_super" cannot be dropped because some objects depend on it
-DETAIL: owner of foreign-data wrapper foo
SET ROLE regress_test_role_super;
-DROP FOREIGN DATA WRAPPER foo;
+ERROR: role "regress_test_role_super" does not exist
+DROP FOREIGN DATA WRAPPER foo; -- ERROR
+ERROR: foreign-data wrapper "foo" does not exist
RESET ROLE;
+ALTER ROLE regress_test_role_super SUPERUSER;
+ERROR: role "regress_test_role_super" does not exist
+DROP FOREIGN DATA WRAPPER foo;
+ERROR: foreign-data wrapper "foo" does not exist
DROP ROLE regress_test_role_super;
+ERROR: role "regress_test_role_super" does not exist
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
-------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(2 rows)
++ List of foreign-data wrappers
++ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
+(0 rows)
CREATE FOREIGN DATA WRAPPER foo;
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s1 FOREIGN DATA WRAPPER foo;
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
COMMENT ON SERVER s1 IS 'foreign server';
+ERROR: server "s1" does not exist
CREATE USER MAPPING FOR current_user SERVER s1;
+ERROR: Postgres-XL does not support USER MAPPING yet
+DETAIL: The feature is not currently supported
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
-------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
++ List of foreign-data wrappers
++ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
+(0 rows)
\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
-------+---------------------------+----------------------+-------------------+------+---------+-------------+----------------
- s1 | regress_foreign_data_user | foo | | | | | foreign server
-(1 row)
+ List of foreign servers
+ Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options
+------+-------+----------------------+-------------------+------+---------+---------
+(0 rows)
\deu+
- List of user mappings
- Server | User name | FDW Options
---------+---------------------------+-------------
- s1 | regress_foreign_data_user |
-(1 row)
+ List of user mappings
+ Server | User name | Options
+--------+-----------+---------
+(0 rows)
DROP FOREIGN DATA WRAPPER foo; -- ERROR
ERROR: cannot drop foreign-data wrapper foo because other objects depend on it
DROP FOREIGN DATA WRAPPER foo CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to server s1
- drop cascades to user mapping for foreign_data_user on server s1
+ drop cascades to user mapping for regress_foreign_data_user on server s1
\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | Options
- ------+-------+---------+-----------+-------------------+---------
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
-------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(2 rows)
++ List of foreign-data wrappers
++ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
+(0 rows)
\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
-------+-------+----------------------+-------------------+------+---------+-------------+-------------
+ List of foreign servers
+ Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options
+------+-------+----------------------+-------------------+------+---------+---------
(0 rows)
\deu+
-- exercise CREATE SERVER
CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: foreign-data wrapper "foo" does not exist
-CREATE FOREIGN DATA WRAPPER foo OPTIONS ("test wrapper" 'true');
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
+CREATE FOREIGN DATA WRAPPER foo OPTIONS (test_wrapper 'true');
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s1 FOREIGN DATA WRAPPER foo;
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: server "s1" already exists
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s2 FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s3 TYPE 'oracle' FOREIGN DATA WRAPPER foo;
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s4 TYPE 'oracle' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s5 VERSION '15.0' FOREIGN DATA WRAPPER foo;
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s6 VERSION '16.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s7 TYPE 'oracle' VERSION '17.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (foo '1'); -- ERROR
-ERROR: invalid option "foo"
-HINT: Valid options in this context are: authtype, service, connect_timeout, dbname, host, hostaddr, port, tty, options, requiressl, sslmode, gsslib
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (host 'localhost', dbname 's8db');
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options
- ------+-------+----------------------+-------------------+------+---------+---------
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
-------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+-------------
- s1 | regress_foreign_data_user | foo | | | | |
- s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') |
-(8 rows)
++ List of foreign-data wrappers
++ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
+(0 rows)
SET ROLE regress_test_role;
CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -- ERROR: no usage on FDW
-ERROR: permission denied for foreign-data wrapper foo
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
RESET ROLE;
GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role;
+ERROR: foreign-data wrapper "foo" does not exist
SET ROLE regress_test_role;
CREATE SERVER t1 FOREIGN DATA WRAPPER foo;
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
RESET ROLE;
\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
-------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+-------------
- s1 | regress_foreign_data_user | foo | | | | |
- s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') |
- t1 | regress_test_role | foo | | | | |
-(9 rows)
++ List of foreign-data wrappers
++ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
++(0 rows)
+ List of foreign servers
+ Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options
+------+-------+----------------------+-------------------+------+---------+---------
+(0 rows)
REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_test_role;
+ERROR: foreign-data wrapper "foo" does not exist
GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect;
+ERROR: foreign-data wrapper "foo" does not exist
SET ROLE regress_test_role;
CREATE SERVER t2 FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: permission denied for foreign-data wrapper foo
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
RESET ROLE;
GRANT regress_test_indirect TO regress_test_role;
SET ROLE regress_test_role;
CREATE SERVER t2 FOREIGN DATA WRAPPER foo;
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
-------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+-------------
- s1 | regress_foreign_data_user | foo | | | | |
- s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') |
- t1 | regress_test_role | foo | | | | |
- t2 | regress_test_role | foo | | | | |
-(10 rows)
++ List of foreign-data wrappers
++ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
++(0 rows)
+ List of foreign servers
+ Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options
+------+-------+----------------------+-------------------+------+---------+---------
+(0 rows)
RESET ROLE;
REVOKE regress_test_indirect FROM regress_test_role;
ALTER SERVER s0 OPTIONS (a '1'); -- ERROR
ERROR: server "s0" does not exist
ALTER SERVER s1 VERSION '1.0' OPTIONS (servername 's1');
+ERROR: server "s1" does not exist
ALTER SERVER s2 VERSION '1.1';
-ALTER SERVER s3 OPTIONS ("tns name" 'orcl', port '1521');
+ERROR: server "s2" does not exist
+ALTER SERVER s3 OPTIONS (tnsname 'orcl', port '1521');
+ERROR: server "s3" does not exist
GRANT USAGE ON FOREIGN SERVER s1 TO regress_test_role;
+ERROR: server "s1" does not exist
GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role2 WITH GRANT OPTION;
+ERROR: server "s6" does not exist
\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
-------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+-----------------------------------+-------------
- s1 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 1.0 | (servername 's1') |
- | | | regress_test_role=U/regress_foreign_data_user | | | |
- s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') |
- | | | regress_test_role2=U*/regress_foreign_data_user | | | |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') |
- t1 | regress_test_role | foo | | | | |
- t2 | regress_test_role | foo | | | | |
-(10 rows)
++ List of foreign-data wrappers
++ Name | Owner | Handler | Validator | Access privileges | FDW Options | Description
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
++(0 rows)
+ List of foreign servers
+ Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options
+------+-------+----------------------+-------------------+------+---------+---------
+(0 rows)
SET ROLE regress_test_role;
ALTER SERVER s1 VERSION '1.1'; -- ERROR
GRANT regress_test_indirect TO regress_test_role;
SET ROLE regress_test_role;
ALTER SERVER s1 OWNER TO regress_test_indirect;
+ERROR: server "s1" does not exist
RESET ROLE;
GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect;
+ERROR: foreign-data wrapper "foo" does not exist
SET ROLE regress_test_role;
ALTER SERVER s1 OWNER TO regress_test_indirect;
+ERROR: server "s1" does not exist
RESET ROLE;
DROP ROLE regress_test_indirect; -- ERROR
-ERROR: role "regress_test_indirect" cannot be dropped because some objects depend on it
-DETAIL: owner of server s1
-privileges for foreign-data wrapper foo
\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
- ------+-----------------------+----------------------+-----------------------------------------------+--------+---------+--------------------------------------+-------------
- s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') |
- s2 | foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') |
- s3 | foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') |
- s4 | foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | foreign_data_user | foo | | | 15.0 | |
- s6 | foreign_data_user | foo | foreign_data_user=U/foreign_data_user +| | 16.0 | (host 'a', dbname 'b') |
- | | | regress_test_role2=U*/foreign_data_user | | | |
- s7 | foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') |
- t1 | regress_test_role | foo | | | | |
- t2 | regress_test_role | foo | | | | |
+ List of foreign servers
+ Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
+ ------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+-------------
+ s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') |
+ s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') |
+ s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') |
+ s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
+ s5 | regress_foreign_data_user | foo | | | 15.0 | |
+ s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') |
+ | | | regress_test_role2=U*/regress_foreign_data_user | | | |
+ s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
+ s8 | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') |
+ t1 | regress_test_role | foo | | | | |
+ t2 | regress_test_role | foo | | | | |
(10 rows)
ALTER SERVER s8 RENAME to s8new;
\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
- -------+-----------------------+----------------------+-----------------------------------------------+--------+---------+--------------------------------------+-------------
- s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') |
- s2 | foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') |
- s3 | foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') |
- s4 | foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | foreign_data_user | foo | | | 15.0 | |
- s6 | foreign_data_user | foo | foreign_data_user=U/foreign_data_user +| | 16.0 | (host 'a', dbname 'b') |
- | | | regress_test_role2=U*/foreign_data_user | | | |
- s7 | foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8new | foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') |
- t1 | regress_test_role | foo | | | | |
- t2 | regress_test_role | foo | | | | |
+ List of foreign servers
+ Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description
+ -------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+-------------
+ s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') |
+ s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') |
+ s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') |
+ s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
+ s5 | regress_foreign_data_user | foo | | | 15.0 | |
+ s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') |
+ | | | regress_test_role2=U*/regress_foreign_data_user | | | |
+ s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
+ s8new | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') |
+ t1 | regress_test_role | foo | | | | |
+ t2 | regress_test_role | foo | | | | |
(10 rows)
-ALTER SERVER s8new RENAME to s8;
-- DROP SERVER
DROP SERVER nonexistent; -- ERROR
ERROR: server "nonexistent" does not exist
DROP SERVER s3; -- ERROR
ERROR: cannot drop server s3 because other objects depend on it
- DETAIL: user mapping for foreign_data_user on server s3 depends on server s3
+ DETAIL: user mapping for regress_foreign_data_user on server s3 depends on server s3
HINT: Use DROP ... CASCADE to drop the dependent objects too.
DROP SERVER s3 CASCADE;
- NOTICE: drop cascades to user mapping for foreign_data_user on server s3
+ NOTICE: drop cascades to user mapping for regress_foreign_data_user on server s3
\des
- List of foreign servers
- Name | Owner | Foreign-data wrapper
-------+---------------------------+----------------------
- s4 | regress_foreign_data_user | foo
- s5 | regress_foreign_data_user | foo
- s6 | regress_foreign_data_user | foo
- s7 | regress_foreign_data_user | foo
- s8 | regress_foreign_data_user | postgresql
- t1 | regress_test_role | foo
- t2 | regress_test_role | foo
-(7 rows)
+ List of foreign servers
+ Name | Owner | Foreign-data wrapper
+------+-------+----------------------
+(0 rows)
\deu
List of user mappings
DROP USER MAPPING IF EXISTS FOR user SERVER ss4;
NOTICE: server does not exist, skipping
DROP USER MAPPING IF EXISTS FOR public SERVER s7;
-NOTICE: user mapping "public" does not exist for the server, skipping
+NOTICE: server does not exist, skipping
CREATE USER MAPPING FOR public SERVER s8;
+ERROR: Postgres-XL does not support USER MAPPING yet
+DETAIL: The feature is not currently supported
SET ROLE regress_test_role;
DROP USER MAPPING FOR public SERVER s8; -- ERROR
-ERROR: must be owner of foreign server s8
+ERROR: server "s8" does not exist
RESET ROLE;
DROP SERVER s7;
+ERROR: server "s7" does not exist
\deu
- List of user mappings
- Server | User name
---------+---------------------------
- s4 | public
- s4 | regress_foreign_data_user
- s5 | regress_test_role
- s6 | regress_test_role
- s8 | public
- s8 | regress_foreign_data_user
- t1 | public
- t1 | regress_test_role
-(8 rows)
-
+List of user mappings
+ Server | User name
+--------+-----------
+(0 rows)
-
-- CREATE FOREIGN TABLE
CREATE SCHEMA foreign_schema;
-CREATE SERVER s0 FOREIGN DATA WRAPPER dummy;
+CREATE SERVER sc FOREIGN DATA WRAPPER dummy;
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
CREATE FOREIGN TABLE ft1 (); -- ERROR
ERROR: syntax error at or near ";"
LINE 1: CREATE FOREIGN TABLE ft1 ();
NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-- Information schema
SELECT * FROM information_schema.foreign_data_wrappers ORDER BY 1, 2;
- foreign_data_wrapper_catalog | foreign_data_wrapper_name | authorization_identifier | library_name | foreign_data_wrapper_language
-------------------------------+---------------------------+---------------------------+--------------+-------------------------------
- regression | dummy | regress_foreign_data_user | | c
- regression | foo | regress_foreign_data_user | | c
- regression | postgresql | regress_foreign_data_user | | c
-(3 rows)
+ foreign_data_wrapper_catalog | foreign_data_wrapper_name | authorization_identifier | library_name | foreign_data_wrapper_language
+------------------------------+---------------------------+--------------------------+--------------+-------------------------------
+(0 rows)
SELECT * FROM information_schema.foreign_data_wrapper_options ORDER BY 1, 2, 3;
- foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value
-------------------------------+---------------------------+--------------+--------------
- regression | foo | test wrapper | true
-(1 row)
+ foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value
+------------------------------+---------------------------+-------------+--------------
+(0 rows)
SELECT * FROM information_schema.foreign_servers ORDER BY 1, 2;
- foreign_server_catalog | foreign_server_name | foreign_data_wrapper_catalog | foreign_data_wrapper_name | foreign_server_type | foreign_server_version | authorization_identifier
-------------------------+---------------------+------------------------------+---------------------------+---------------------+------------------------+---------------------------
- regression | s0 | regression | dummy | | | regress_foreign_data_user
- regression | s4 | regression | foo | oracle | | regress_foreign_data_user
- regression | s5 | regression | foo | | 15.0 | regress_test_role
- regression | s6 | regression | foo | | 16.0 | regress_test_indirect
- regression | s8 | regression | postgresql | | | regress_foreign_data_user
- regression | t1 | regression | foo | | | regress_test_indirect
- regression | t2 | regression | foo | | | regress_test_role
-(7 rows)
-
+ foreign_server_catalog | foreign_server_name | foreign_data_wrapper_catalog | foreign_data_wrapper_name | foreign_server_type | foreign_server_version | authorization_identifier
+------------------------+---------------------+------------------------------+---------------------------+---------------------+------------------------+--------------------------
+(0 rows)
-
SELECT * FROM information_schema.foreign_server_options ORDER BY 1, 2, 3;
- foreign_server_catalog | foreign_server_name | option_name | option_value
-------------------------+---------------------+-----------------+--------------
- regression | s4 | dbname | b
- regression | s4 | host | a
- regression | s6 | dbname | b
- regression | s6 | host | a
- regression | s8 | connect_timeout | 30
- regression | s8 | dbname | db1
-(6 rows)
+ foreign_server_catalog | foreign_server_name | option_name | option_value
+------------------------+---------------------+-------------+--------------
+(0 rows)
SELECT * FROM information_schema.user_mappings ORDER BY lower(authorization_identifier), 2, 3;
- authorization_identifier | foreign_server_catalog | foreign_server_name
----------------------------+------------------------+---------------------
- PUBLIC | regression | s4
- PUBLIC | regression | s8
- PUBLIC | regression | t1
- regress_foreign_data_user | regression | s4
- regress_foreign_data_user | regression | s8
- regress_test_role | regression | s5
- regress_test_role | regression | s6
- regress_test_role | regression | t1
-(8 rows)
+ authorization_identifier | foreign_server_catalog | foreign_server_name
+--------------------------+------------------------+---------------------
+(0 rows)
SELECT * FROM information_schema.user_mapping_options ORDER BY lower(authorization_identifier), 2, 3, 4;
- authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value
----------------------------+------------------------+---------------------+--------------+--------------
- PUBLIC | regression | s4 | this mapping | is public
- PUBLIC | regression | t1 | modified | 1
- regress_foreign_data_user | regression | s8 | password | public
- regress_test_role | regression | s5 | modified | 1
- regress_test_role | regression | s6 | username | test
- regress_test_role | regression | t1 | password | boo
- regress_test_role | regression | t1 | username | bob
-(7 rows)
+ authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value
+--------------------------+------------------------+---------------------+-------------+--------------
+(0 rows)
SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
- grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable
- -----------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+--------------
- foreign_data_user | foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES
- foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO
- regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES
- regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES
+ grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable
+ ---------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+--------------
+ regress_foreign_data_user | regress_foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES
+ regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO
+ regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES
+ regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES
(4 rows)
SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4;
authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value
--------------------------+------------------------+---------------------+-------------+--------------
- PUBLIC | regression | t1 | modified | 1
- regress_test_role | regression | s5 | modified | 1
- regress_test_role | regression | s6 | username | test
- regress_test_role | regression | t1 | password | boo
- regress_test_role | regression | t1 | username | bob
-(5 rows)
+(0 rows)
SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
- grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable
- -----------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+--------------
- foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO
- regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES
- regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES
+ grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable
+ ---------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+--------------
+ regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO
+ regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES
+ regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES
(3 rows)
SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
(1 row)
SELECT has_server_privilege(
- (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 's8', 'USAGE');
- has_server_privilege
-----------------------
- f
-(1 row)
-
+ (SELECT oid FROM pg_catalog.pg_roles WHERE rolname='regress_test_role'), 's8', 'USAGE');
+ERROR: server "s8" does not exist
SELECT has_server_privilege('s8', 'USAGE');
- has_server_privilege
-----------------------
- t
-(1 row)
-
+ERROR: server "s8" does not exist
GRANT USAGE ON FOREIGN SERVER s8 TO regress_test_role;
+ERROR: server "s8" does not exist
SELECT has_server_privilege('regress_test_role', 's8', 'USAGE');
- has_server_privilege
-----------------------
- t
-(1 row)
-
+ERROR: server "s8" does not exist
REVOKE USAGE ON FOREIGN SERVER s8 FROM regress_test_role;
+ERROR: server "s8" does not exist
GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role;
+ERROR: server "s4" does not exist
DROP USER MAPPING FOR public SERVER s4;
+ERROR: server "s4" does not exist
ALTER SERVER s6 OPTIONS (DROP host, DROP dbname);
+ERROR: server "s6" does not exist
ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (DROP username);
+ERROR: server "s6" does not exist
ALTER FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator;
-WARNING: changing the foreign-data wrapper validator can cause the options for dependent objects to become invalid
+ERROR: foreign-data wrapper "foo" does not exist
-- Privileges
- SET ROLE unprivileged_role;
+ SET ROLE regress_unprivileged_role;
CREATE FOREIGN DATA WRAPPER foobar; -- ERROR
-ERROR: permission denied to create foreign-data wrapper "foobar"
-HINT: Must be superuser to create a foreign-data wrapper.
+ERROR: Postgres-XL does not support FOREIGN DATA WRAPPER yet
+DETAIL: The feature is not currently supported
ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR
ERROR: permission denied to alter foreign-data wrapper "foo"
HINT: Must be superuser to alter a foreign-data wrapper.
DROP SERVER s9 CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to user mapping for public on server s9
- drop cascades to user mapping for unprivileged_role on server s9
+ drop cascades to user mapping for regress_unprivileged_role on server s9
RESET ROLE;
CREATE SERVER s9 FOREIGN DATA WRAPPER foo;
-GRANT USAGE ON FOREIGN SERVER s9 TO regress_unprivileged_role;
-SET ROLE regress_unprivileged_role;
+ERROR: Postgres-XL does not support SERVER yet
+DETAIL: The feature is not currently supported
+GRANT USAGE ON FOREIGN SERVER s9 TO unprivileged_role;
+ERROR: server "s9" does not exist
+SET ROLE unprivileged_role;
ALTER SERVER s9 VERSION '1.2'; -- ERROR
-ERROR: must be owner of foreign server s9
+ERROR: server "s9" does not exist
GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -- WARNING
-WARNING: no privileges were granted for "s9"
+ERROR: server "s9" does not exist
CREATE USER MAPPING FOR current_user SERVER s9;
+ERROR: Postgres-XL does not support USER MAPPING yet
+DETAIL: The feature is not currently supported
DROP SERVER s9 CASCADE; -- ERROR
-ERROR: must be owner of foreign server s9
+ERROR: server "s9" does not exist
RESET ROLE;
-- Triggers
CREATE FUNCTION dummy_trigger() RETURNS TRIGGER AS $$
DETAIL: privileges for server s4
privileges for foreign-data wrapper foo
owner of user mapping for regress_test_role on server s6
- owner of user mapping for regress_test_role on server s5
- owner of server s5
- owner of server t2
- DROP SERVER s5 CASCADE;
- NOTICE: drop cascades to user mapping for regress_test_role on server s5
DROP SERVER t1 CASCADE;
NOTICE: drop cascades to user mapping for public on server t1
+DROP SERVER t2;
+ERROR: server "t2" does not exist
DROP USER MAPPING FOR regress_test_role SERVER s6;
+ERROR: role "regress_test_role" does not exist
-- This test causes some order dependent cascade detail output,
--- so switch to terse mode for it.
+-- so switch to terse mode for it.
\set VERBOSITY terse
DROP FOREIGN DATA WRAPPER foo CASCADE;
-NOTICE: drop cascades to 5 other objects
+ERROR: foreign-data wrapper "foo" does not exist
\set VERBOSITY default
DROP SERVER s8 CASCADE;
NOTICE: drop cascades to 2 other objects
- DETAIL: drop cascades to user mapping for foreign_data_user on server s8
+ DETAIL: drop cascades to user mapping for regress_foreign_data_user on server s8
drop cascades to user mapping for public on server s8
DROP ROLE regress_test_indirect;
+ERROR: role "regress_test_indirect" does not exist
DROP ROLE regress_test_role;
-DROP ROLE regress_unprivileged_role; -- ERROR
-ERROR: role "regress_unprivileged_role" cannot be dropped because some objects depend on it
-DETAIL: privileges for foreign-data wrapper postgresql
-REVOKE ALL ON FOREIGN DATA WRAPPER postgresql FROM regress_unprivileged_role;
-DROP ROLE regress_unprivileged_role;
+ERROR: role "regress_test_role" does not exist
+DROP ROLE unprivileged_role; -- ERROR
+REVOKE ALL ON FOREIGN DATA WRAPPER postgresql FROM unprivileged_role;
+ERROR: foreign-data wrapper "postgresql" does not exist
+DROP ROLE unprivileged_role;
+ERROR: role "unprivileged_role" does not exist
DROP ROLE regress_test_role2;
DROP FOREIGN DATA WRAPPER postgresql CASCADE;
+ERROR: foreign-data wrapper "postgresql" does not exist
DROP FOREIGN DATA WRAPPER dummy CASCADE;
-NOTICE: drop cascades to server s0
+ERROR: foreign-data wrapper "dummy" does not exist
\c
- DROP ROLE foreign_data_user;
+ DROP ROLE regress_foreign_data_user;
-- At this point we should have no wrappers, no servers, and no mappings.
SELECT fdwname, fdwhandler, fdwvalidator, fdwoptions FROM pg_foreign_data_wrapper;
fdwname | fdwhandler | fdwvalidator | fdwoptions
insert into pp values(12);
insert into pp values(11);
update pp set f1=f1+1;
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
insert into cc values(13);
+ERROR: insert or update on table "cc" violates foreign key constraint "cc_f1_fkey"
+DETAIL: Key (f1)=(13) is not present in table "pp".
update pp set f1=f1+1; -- fail
-ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc"
-DETAIL: Key (f1)=(13) is still referenced from table "cc".
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
drop table pp, cc;
+ --
+ -- Test interaction of foreign-key optimization with rules (bug #14219)
+ --
+ create temp table t1 (a integer primary key, b text);
+ create temp table t2 (a integer primary key, b integer references t1);
+ create rule r1 as on delete to t1 do delete from t2 where t2.b = old.a;
+ explain (costs off) delete from t1 where a = 1;
+ QUERY PLAN
+ --------------------------------------------
+ Delete on t2
+ -> Nested Loop
+ -> Index Scan using t1_pkey on t1
+ Index Cond: (a = 1)
+ -> Seq Scan on t2
+ Filter: (b = 1)
+
+ Delete on t1
+ -> Index Scan using t1_pkey on t1
+ Index Cond: (a = 1)
+ (10 rows)
+
+ delete from t1 where a = 1;
-- Also test an index-only knn-search
explain (costs off)
select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5))
-order by p <-> point(0.201, 0.201);
- QUERY PLAN
---------------------------------------------------------
- Index Only Scan using gist_tbl_point_index on gist_tbl
- Index Cond: (p <@ '(0.5,0.5),(0,0)'::box)
- Order By: (p <-> '(0.201,0.201)'::point)
-(3 rows)
+order by p <-> point(0.2, 0.2);
+ QUERY PLAN
+--------------------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Index Only Scan using gist_tbl_point_index on gist_tbl
+ Index Cond: (p <@ '(0.5,0.5),(0,0)'::box)
+ Order By: (p <-> '(0.2,0.2)'::point)
+(4 rows)
select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5))
- order by p <-> point(0.2, 0.2);
+ order by p <-> point(0.201, 0.201);
p
-------------
(0.2,0.2)
-- Check commuted case as well
explain (costs off)
select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5))
-order by point(0.101, 0.101) <-> p;
- QUERY PLAN
---------------------------------------------------------
- Index Only Scan using gist_tbl_point_index on gist_tbl
- Index Cond: (p <@ '(0.5,0.5),(0,0)'::box)
- Order By: (p <-> '(0.101,0.101)'::point)
-(3 rows)
+order by point(0.1, 0.1) <-> p;
+ QUERY PLAN
+--------------------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Index Only Scan using gist_tbl_point_index on gist_tbl
+ Index Cond: (p <@ '(0.5,0.5),(0,0)'::box)
+ Order By: (p <-> '(0.1,0.1)'::point)
+(4 rows)
select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5))
- order by point(0.1, 0.1) <-> p;
+ order by point(0.101, 0.101) <-> p;
p
-------------
(0.1,0.1)
select ten, sum(distinct four) from onek a
group by grouping sets((ten,four),(ten))
having exists (select 1 from onek b where sum(distinct a.four) = b.four);
- ten | sum
------+-----
- 0 | 0
- 0 | 2
- 0 | 2
- 1 | 1
- 1 | 3
- 2 | 0
- 2 | 2
- 2 | 2
- 3 | 1
- 3 | 3
- 4 | 0
- 4 | 2
- 4 | 2
- 5 | 1
- 5 | 3
- 6 | 0
- 6 | 2
- 6 | 2
- 7 | 1
- 7 | 3
- 8 | 0
- 8 | 2
- 8 | 2
- 9 | 1
- 9 | 3
-(25 rows)
+ERROR: GROUPING SETS, ROLLUP or CUBE is not yet supported
+-- Tests around pushdown of HAVING clauses, partially testing against previous bugs
+select a,count(*) from gstest2 group by rollup(a) order by a;
+ERROR: GROUPING SETS, ROLLUP or CUBE is not yet supported
+select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
+ERROR: GROUPING SETS, ROLLUP or CUBE is not yet supported
+explain (costs off)
+ select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
+ERROR: GROUPING SETS, ROLLUP or CUBE is not yet supported
+select v.c, (select count(*) from gstest2 group by () having v.c)
+ from (values (false),(true)) v(c) order by v.c;
+ c | count
+---+-------
+ f |
+ t | 9
+(2 rows)
+
+explain (costs off)
+ select v.c, (select count(*) from gstest2 group by () having v.c)
+ from (values (false),(true)) v(c) order by v.c;
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Sort
+ Sort Key: "*VALUES*".column1
+ -> Values Scan on "*VALUES*"
+ SubPlan 1
+ -> Aggregate
+ Group Key: ()
+ Filter: "*VALUES*".column1
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Aggregate
+ -> Seq Scan on gstest2
+(10 rows)
+
+-- HAVING with GROUPING queries
+select ten, grouping(ten) from onek
+group by grouping sets(ten) having grouping(ten) >= 0
+order by 2,1;
+ERROR: GROUPING SETS, ROLLUP or CUBE is not yet supported
+select ten, grouping(ten) from onek
+group by grouping sets(ten, four) having grouping(ten) > 0
+order by 2,1;
+ERROR: GROUPING SETS, ROLLUP or CUBE is not yet supported
+select ten, grouping(ten) from onek
+group by rollup(ten) having grouping(ten) > 0
+order by 2,1;
+ERROR: GROUPING SETS, ROLLUP or CUBE is not yet supported
+select ten, grouping(ten) from onek
+group by cube(ten) having grouping(ten) > 0
+order by 2,1;
+ERROR: GROUPING SETS, ROLLUP or CUBE is not yet supported
++select ten, grouping(ten) from onek
++group by (ten) having grouping(ten) >= 0
++order by 2,1;
++ ten | grouping
++-----+----------
++ 0 | 0
++ 1 | 0
++ 2 | 0
++ 3 | 0
++ 4 | 0
++ 5 | 0
++ 6 | 0
++ 7 | 0
++ 8 | 0
++ 9 | 0
++(10 rows)
+
+ -- Tests around pushdown of HAVING clauses, partially testing against previous bugs
+ select a,count(*) from gstest2 group by rollup(a) order by a;
+ a | count
+ ---+-------
+ 1 | 8
+ 2 | 1
+ | 9
+ (3 rows)
+
+ select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
+ a | count
+ ---+-------
+ 2 | 1
+ | 9
+ (2 rows)
+
+ explain (costs off)
+ select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
+ QUERY PLAN
+ ----------------------------------
+ GroupAggregate
+ Group Key: a
+ Group Key: ()
+ Filter: (a IS DISTINCT FROM 1)
+ -> Sort
+ Sort Key: a
+ -> Seq Scan on gstest2
+ (7 rows)
+
+ select v.c, (select count(*) from gstest2 group by () having v.c)
+ from (values (false),(true)) v(c) order by v.c;
+ c | count
+ ---+-------
+ f |
+ t | 9
+ (2 rows)
+
+ explain (costs off)
+ select v.c, (select count(*) from gstest2 group by () having v.c)
+ from (values (false),(true)) v(c) order by v.c;
+ QUERY PLAN
+ -----------------------------------------------------------
+ Sort
+ Sort Key: "*VALUES*".column1
+ -> Values Scan on "*VALUES*"
+ SubPlan 1
+ -> Aggregate
+ Group Key: ()
+ Filter: "*VALUES*".column1
+ -> Result
+ One-Time Filter: "*VALUES*".column1
+ -> Seq Scan on gstest2
+ (10 rows)
+
+ -- HAVING with GROUPING queries
+ select ten, grouping(ten) from onek
+ group by grouping sets(ten) having grouping(ten) >= 0
+ order by 2,1;
+ ten | grouping
+ -----+----------
+ 0 | 0
+ 1 | 0
+ 2 | 0
+ 3 | 0
+ 4 | 0
+ 5 | 0
+ 6 | 0
+ 7 | 0
+ 8 | 0
+ 9 | 0
+ (10 rows)
+
+ select ten, grouping(ten) from onek
+ group by grouping sets(ten, four) having grouping(ten) > 0
+ order by 2,1;
+ ten | grouping
+ -----+----------
+ | 1
+ | 1
+ | 1
+ | 1
+ (4 rows)
+
+ select ten, grouping(ten) from onek
+ group by rollup(ten) having grouping(ten) > 0
+ order by 2,1;
+ ten | grouping
+ -----+----------
+ | 1
+ (1 row)
+
+ select ten, grouping(ten) from onek
+ group by cube(ten) having grouping(ten) > 0
+ order by 2,1;
+ ten | grouping
+ -----+----------
+ | 1
+ (1 row)
+
select ten, grouping(ten) from onek
group by (ten) having grouping(ten) >= 0
order by 2,1;
select * from
tenk1, int8_tbl a, int8_tbl b
where thousand = a.q1 and tenthous = b.q1 and a.q2 = 1 and b.q2 = 2;
- QUERY PLAN
----------------------------------------------------------------------
+ QUERY PLAN
+---------------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Merge Join
+ Merge Cond: (tenk1.thousand = a.q1)
+ -> Sort
+ Sort Key: tenk1.thousand
+ -> Merge Join
+ Merge Cond: (tenk1.tenthous = b.q1)
+ -> Sort
+ Sort Key: tenk1.tenthous
+ -> Seq Scan on tenk1
+ -> Sort
+ Sort Key: b.q1
+ -> Seq Scan on int8_tbl b
+ Filter: (q2 = 2)
+ -> Sort
+ Sort Key: a.q1
+ -> Seq Scan on int8_tbl a
+ Filter: (q2 = 1)
+(19 rows)
+
+--
+-- test a corner case in which we shouldn't apply the star-schema optimization
+--
+explain (costs off, nodes off)
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (values(1,0)) v1(x1,x2)
+ left join (values(3,1)) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+ QUERY PLAN
+-----------------------------------------------------------------------------------
Nested Loop
- -> Seq Scan on int8_tbl b
- Filter: (q2 = 2)
+ Join Filter: (t1.stringu1 > t2.stringu2)
-> Nested Loop
- -> Seq Scan on int8_tbl a
- Filter: (q2 = 1)
- -> Index Scan using tenk1_thous_tenthous on tenk1
- Index Cond: ((thousand = a.q1) AND (tenthous = b.q1))
-(8 rows)
+ Join Filter: ((0) = i1.f1)
+ -> Nested Loop
+ -> Nested Loop
+ Join Filter: ((1) = (1))
+ -> Result
+ -> Result
+ -> Materialize
+ -> Remote Subquery Scan on all
+ -> Index Scan using tenk1_unique2 on tenk1 t1
+ Index Cond: ((unique2 = (11)) AND (unique2 < 42))
+ -> Materialize
+ -> Remote Subquery Scan on all
+ -> Seq Scan on int4_tbl i1
+ -> Materialize
+ -> Remote Subquery Scan on all
+ -> Index Scan using tenk1_unique1 on tenk1 t2
+ Index Cond: (unique1 = (3))
+(20 rows)
+
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (values(1,0)) v1(x1,x2)
+ left join (values(3,1)) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+ unique2 | stringu1 | unique1 | stringu2
+---------+----------+---------+----------
+ 11 | WFAAAA | 3 | LKIAAA
+(1 row)
+
+-- variant that isn't quite a star-schema case
+select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+where t1.unique1 < i4.f1;
+ d1
+----
+(0 rows)
+ --
+ -- test a corner case in which we shouldn't apply the star-schema optimization
+ --
+ explain (costs off)
+ select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (values(1,0)) v1(x1,x2)
+ left join (values(3,1)) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+ QUERY PLAN
+ -----------------------------------------------------------------------
+ Nested Loop
+ Join Filter: (t1.stringu1 > t2.stringu2)
+ -> Nested Loop
+ Join Filter: ((0) = i1.f1)
+ -> Nested Loop
+ -> Nested Loop
+ Join Filter: ((1) = (1))
+ -> Result
+ -> Result
+ -> Index Scan using tenk1_unique2 on tenk1 t1
+ Index Cond: ((unique2 = (11)) AND (unique2 < 42))
+ -> Seq Scan on int4_tbl i1
+ -> Index Scan using tenk1_unique1 on tenk1 t2
+ Index Cond: (unique1 = (3))
+ (14 rows)
+
+ select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (values(1,0)) v1(x1,x2)
+ left join (values(3,1)) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+ unique2 | stringu1 | unique1 | stringu2
+ ---------+----------+---------+----------
+ 11 | WFAAAA | 3 | LKIAAA
+ (1 row)
+
+ -- variant that isn't quite a star-schema case
+ select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+ where t1.unique1 < i4.f1;
+ d1
+ ----
+ (0 rows)
+
--
-- test extraction of restriction OR clauses from join OR clause
-- (we used to only do this for indexable clauses)
explain (costs off)
select d.* from d left join (select distinct * from b) s
on d.a = s.id and d.b = s.c_id;
- QUERY PLAN
----------------
- Seq Scan on d
-(1 row)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on d
+(2 rows)
-- join removal is not possible when the GROUP BY contains a column that is
- -- not in the join condition
+ -- not in the join condition. (Note: as of 9.6, we notice that b.id is a
+ -- primary key and so drop b.c_id from the GROUP BY of the resulting plan;
+ -- but this happens too late for join removal in the outer plan level.)
explain (costs off)
select d.* from d left join (select * from b group by b.id, b.c_id) s
on d.a = s.id;
---+------------------+-------------------+------------------
1 | 123 | 456 | 123
1 | 123 | 4567890123456789 | 123
+ 1 | 4567890123456789 | -4567890123456789 | 4567890123456789
1 | 4567890123456789 | 123 | 42
1 | 4567890123456789 | 4567890123456789 | 4567890123456789
- 1 | 4567890123456789 | -4567890123456789 | 4567890123456789
(5 rows)
+ rollback;
+ -- another join removal bug: we must clean up correctly when removing a PHV
+ begin;
+ create temp table uniquetbl (f1 text unique);
+ explain (costs off)
+ select t1.* from
+ uniquetbl as t1
+ left join (select *, '***'::text as d1 from uniquetbl) t2
+ on t1.f1 = t2.f1
+ left join uniquetbl t3
+ on t2.d1 = t3.f1;
+ QUERY PLAN
+ --------------------------
+ Seq Scan on uniquetbl t1
+ (1 row)
+
+ explain (costs off)
+ select t0.*
+ from
+ text_tbl t0
+ left join
+ (select case t1.ten when 0 then 'doh!'::text else null::text end as case1,
+ t1.stringu2
+ from tenk1 t1
+ join int4_tbl i4 ON i4.f1 = t1.unique2
+ left join uniquetbl u1 ON u1.f1 = t1.string4) ss
+ on t0.f1 = ss.case1
+ where ss.stringu2 !~* ss.case1;
+ QUERY PLAN
+ --------------------------------------------------------------------------------------------
+ Nested Loop
+ Join Filter: (CASE t1.ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END = t0.f1)
+ -> Nested Loop
+ -> Seq Scan on int4_tbl i4
+ -> Index Scan using tenk1_unique2 on tenk1 t1
+ Index Cond: (unique2 = i4.f1)
+ Filter: (stringu2 !~* CASE ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END)
+ -> Materialize
+ -> Seq Scan on text_tbl t0
+ (9 rows)
+
+ select t0.*
+ from
+ text_tbl t0
+ left join
+ (select case t1.ten when 0 then 'doh!'::text else null::text end as case1,
+ t1.stringu2
+ from tenk1 t1
+ join int4_tbl i4 ON i4.f1 = t1.unique2
+ left join uniquetbl u1 ON u1.f1 = t1.string4) ss
+ on t0.f1 = ss.case1
+ where ss.stringu2 !~* ss.case1;
+ f1
+ ------
+ doh!
+ (1 row)
+
rollback;
-- bug #8444: we've historically allowed duplicate aliases within aliased JOINs
select * from
-- create a materialized view with no data, and confirm correct behavior
EXPLAIN (costs off)
- CREATE MATERIALIZED VIEW mvtest_tm AS SELECT type, sum(amt) AS totamt FROM mvtest_t GROUP BY type WITH NO DATA;
- QUERY PLAN
-----------------------------
+ CREATE MATERIALIZED VIEW tm AS SELECT type, sum(amt) AS totamt FROM t GROUP BY type WITH NO DATA;
+ QUERY PLAN
+-----------------------------------------------------------
HashAggregate
Group Key: type
- -> Seq Scan on mvtest_t
-(3 rows)
-
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> HashAggregate
+ Group Key: type
+ -> Seq Scan on t
+(6 rows)
-
- CREATE MATERIALIZED VIEW tm AS SELECT type, sum(amt) AS totamt FROM t GROUP BY type WITH NO DATA;
- SELECT relispopulated FROM pg_class WHERE oid = 'tm'::regclass;
+ CREATE MATERIALIZED VIEW mvtest_tm AS SELECT type, sum(amt) AS totamt FROM mvtest_t GROUP BY type WITH NO DATA;
+ SELECT relispopulated FROM pg_class WHERE oid = 'mvtest_tm'::regclass;
relispopulated
----------------
f
-- create various views
EXPLAIN (costs off)
- CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type;
- QUERY PLAN
-----------------------------------
+ CREATE MATERIALIZED VIEW tvm AS SELECT * FROM tv ORDER BY type;
+ QUERY PLAN
+-----------------------------------------------------------------
Sort
- Sort Key: t.type
+ Sort Key: mvtest_t.type
-> HashAggregate
- Group Key: mvtest_t.type
- -> Seq Scan on mvtest_t
-(5 rows)
+ Group Key: t.type
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> HashAggregate
+ Group Key: t.type
+ -> Seq Scan on t
+(8 rows)
- CREATE MATERIALIZED VIEW tvm AS SELECT * FROM tv ORDER BY type;
- SELECT * FROM tvm;
+ CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type;
+ SELECT * FROM mvtest_tvm;
type | totamt
------+--------
x | 5
z | 11
(3 rows)
- CREATE MATERIALIZED VIEW tmm AS SELECT sum(totamt) AS grandtot FROM tm;
- CREATE MATERIALIZED VIEW tvmm AS SELECT sum(totamt) AS grandtot FROM tvm;
- CREATE UNIQUE INDEX tvmm_expr ON tvmm ((grandtot > 0));
- CREATE UNIQUE INDEX tvmm_pred ON tvmm (grandtot) WHERE grandtot < 0;
- CREATE VIEW tvv AS SELECT sum(totamt) AS grandtot FROM tv;
+ CREATE MATERIALIZED VIEW mvtest_tmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tm;
+ CREATE MATERIALIZED VIEW mvtest_tvmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tvm;
+ CREATE UNIQUE INDEX mvtest_tvmm_expr ON mvtest_tvmm ((grandtot > 0));
+ CREATE UNIQUE INDEX mvtest_tvmm_pred ON mvtest_tvmm (grandtot) WHERE grandtot < 0;
+ CREATE VIEW mvtest_tvv AS SELECT sum(totamt) AS grandtot FROM mvtest_tv;
EXPLAIN (costs off)
- CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv;
- QUERY PLAN
-----------------------------------
+ CREATE MATERIALIZED VIEW tvvm AS SELECT * FROM tvv;
+ QUERY PLAN
+-----------------------------------------------------------------
Aggregate
-> HashAggregate
- Group Key: mvtest_t.type
- -> Seq Scan on mvtest_t
-(4 rows)
+ Group Key: t.type
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> HashAggregate
+ Group Key: t.type
+ -> Seq Scan on t
+(7 rows)
- CREATE MATERIALIZED VIEW tvvm AS SELECT * FROM tvv;
- CREATE VIEW tvvmv AS SELECT * FROM tvvm;
- CREATE MATERIALIZED VIEW bb AS SELECT * FROM tvvmv;
- CREATE INDEX aa ON bb (grandtot);
+ CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv;
+ CREATE VIEW mvtest_tvvmv AS SELECT * FROM mvtest_tvvm;
+ CREATE MATERIALIZED VIEW mvtest_bb AS SELECT * FROM mvtest_tvvmv;
+ CREATE INDEX mvtest_aa ON mvtest_bb (grandtot);
-- check that plans seem reasonable
- \d+ tvm
- Materialized view "public.tvm"
+ \d+ mvtest_tvm
+ Materialized view "public.mvtest_tvm"
Column | Type | Modifiers | Storage | Stats target | Description
--------+---------+-----------+----------+--------------+-------------
type | text | | extended | |
------+--------
x | 5
y | 12
- z | 24
+ z | 11
(3 rows)
- SELECT * FROM tvm ORDER BY type;
+ SELECT * FROM mvtest_tvm ORDER BY type;
type | totamt
------+--------
x | 5
(1 row)
EXPLAIN (costs off)
- SELECT * FROM tvvm;
- QUERY PLAN
- ------------------
- Seq Scan on tvvm
+ SELECT * FROM mvtest_tvvm;
+ QUERY PLAN
+ -------------------------
+ Seq Scan on mvtest_tvvm
(1 row)
- SELECT * FROM tmm;
+ SELECT * FROM mvtest_tmm;
grandtot
----------
- 41
+ 28
(1 row)
- SELECT * FROM tvmm;
+ SELECT * FROM mvtest_tvmm;
grandtot
----------
41
---
(0 rows)
- DROP TABLE hoge CASCADE;
- NOTICE: drop cascades to materialized view hogeview
+ DROP TABLE mvtest_huge CASCADE;
+ NOTICE: drop cascades to materialized view mvtest_hugeview
-- test that duplicate values on unique index prevent refresh
- CREATE TABLE foo(a, b) AS VALUES(1, 10);
- CREATE MATERIALIZED VIEW mv AS SELECT * FROM foo;
- CREATE UNIQUE INDEX ON mv(a);
- INSERT INTO foo SELECT * FROM foo;
- REFRESH MATERIALIZED VIEW mv;
- ERROR: could not create unique index "mv_a_idx"
+ CREATE TABLE mvtest_foo(a, b) AS VALUES(1, 10);
+ CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo;
+ CREATE UNIQUE INDEX ON mvtest_mv(a);
+ INSERT INTO mvtest_foo SELECT * FROM mvtest_foo;
+ REFRESH MATERIALIZED VIEW mvtest_mv;
+ ERROR: could not create unique index "mvtest_mv_a_idx"
DETAIL: Key (a)=(1) is duplicated.
-REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv;
-ERROR: new data for materialized view "mvtest_mv" contains duplicate rows without any null columns
-DETAIL: Row: (1,10)
-DROP TABLE mvtest_foo CASCADE;
-NOTICE: drop cascades to materialized view mvtest_mv
+DROP TABLE foo CASCADE;
+NOTICE: drop cascades to materialized view mv
-- make sure that all columns covered by unique indexes works
-CREATE TABLE mvtest_foo(a, b, c) AS VALUES(1, 2, 3);
-CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo;
-CREATE UNIQUE INDEX ON mvtest_mv (a);
-CREATE UNIQUE INDEX ON mvtest_mv (b);
-CREATE UNIQUE INDEX on mvtest_mv (c);
-INSERT INTO mvtest_foo VALUES(2, 3, 4);
-INSERT INTO mvtest_foo VALUES(3, 4, 5);
-REFRESH MATERIALIZED VIEW mvtest_mv;
-REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv;
-DROP TABLE mvtest_foo CASCADE;
-NOTICE: drop cascades to materialized view mvtest_mv
+CREATE TABLE foo(a, b, c) AS VALUES(1, 2, 3);
+CREATE MATERIALIZED VIEW mv AS SELECT * FROM foo;
+CREATE UNIQUE INDEX ON mv (a);
+CREATE UNIQUE INDEX ON mv (b);
+CREATE UNIQUE INDEX on mv (c);
+INSERT INTO foo VALUES(2, 3, 4);
+INSERT INTO foo VALUES(3, 4, 5);
+REFRESH MATERIALIZED VIEW mv;
+DROP TABLE foo CASCADE;
+NOTICE: drop cascades to materialized view mv
-- allow subquery to reference unpopulated matview if WITH NO DATA is specified
- CREATE MATERIALIZED VIEW mv1 AS SELECT 1 AS col1 WITH NO DATA;
- CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM mv1
- WHERE col1 = (SELECT LEAST(col1) FROM mv1) WITH NO DATA;
- DROP MATERIALIZED VIEW mv1 CASCADE;
- NOTICE: drop cascades to materialized view mv2
+ CREATE MATERIALIZED VIEW mvtest_mv1 AS SELECT 1 AS col1 WITH NO DATA;
+ CREATE MATERIALIZED VIEW mvtest_mv2 AS SELECT * FROM mvtest_mv1
+ WHERE col1 = (SELECT LEAST(col1) FROM mvtest_mv1) WITH NO DATA;
+ DROP MATERIALIZED VIEW mvtest_mv1 CASCADE;
+ NOTICE: drop cascades to materialized view mvtest_mv2
-- make sure that types with unusual equality tests work
- CREATE TABLE boxes (id serial primary key, b box);
- INSERT INTO boxes (b) VALUES
+ CREATE TABLE mvtest_boxes (id serial primary key, b box);
+ INSERT INTO mvtest_boxes (b) VALUES
('(32,32),(31,31)'),
('(2.0000004,2.0000004),(1,1)'),
('(1.9999996,1.9999996),(1,1)');
3 | (1.9999996,1.9999996),(1,1)
(3 rows)
- DROP TABLE boxes CASCADE;
- NOTICE: drop cascades to materialized view boxmv
+ DROP TABLE mvtest_boxes CASCADE;
+ NOTICE: drop cascades to materialized view mvtest_boxmv
-- make sure that column names are handled correctly
-CREATE TABLE mvtest_v (i int, j int);
-CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj, kk) AS SELECT i, j FROM mvtest_v; -- error
-ERROR: too many column names were specified
-CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj) AS SELECT i, j FROM mvtest_v; -- ok
-CREATE MATERIALIZED VIEW mvtest_mv_v_2 (ii) AS SELECT i, j FROM mvtest_v; -- ok
-CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj, kk) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- error
-ERROR: too many column names were specified
-CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok
-CREATE MATERIALIZED VIEW mvtest_mv_v_4 (ii) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok
-ALTER TABLE mvtest_v RENAME COLUMN i TO x;
-INSERT INTO mvtest_v values (1, 2);
-CREATE UNIQUE INDEX mvtest_mv_v_ii ON mvtest_mv_v (ii);
-REFRESH MATERIALIZED VIEW mvtest_mv_v;
-UPDATE mvtest_v SET j = 3 WHERE x = 1;
-REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_v;
-REFRESH MATERIALIZED VIEW mvtest_mv_v_2;
-REFRESH MATERIALIZED VIEW mvtest_mv_v_3;
-REFRESH MATERIALIZED VIEW mvtest_mv_v_4;
-SELECT * FROM mvtest_v;
+CREATE TABLE v (i int, j int);
+CREATE MATERIALIZED VIEW mv_v (ii) AS SELECT i, j AS jj FROM v;
+ALTER TABLE v RENAME COLUMN i TO x;
+INSERT INTO v values (1, 2);
+CREATE UNIQUE INDEX mv_v_ii ON mv_v (ii);
+REFRESH MATERIALIZED VIEW mv_v;
+UPDATE v SET j = 3 WHERE x = 1;
+SELECT * FROM v;
x | j
---+---
1 | 3
(1 row)
- SELECT * FROM mv_v;
+ SELECT * FROM mvtest_mv_v;
ii | jj
----+----
- 1 | 3
+ 1 | 2
(1 row)
- DROP TABLE v CASCADE;
- NOTICE: drop cascades to materialized view mv_v
+ SELECT * FROM mvtest_mv_v_2;
+ ii | j
+ ----+---
+ 1 | 3
+ (1 row)
+
+ SELECT * FROM mvtest_mv_v_3;
+ ii | jj
+ ----+----
+ 1 | 3
+ (1 row)
+
+ SELECT * FROM mvtest_mv_v_4;
+ ii | j
+ ----+---
+ 1 | 3
+ (1 row)
+
+ DROP TABLE mvtest_v CASCADE;
+ NOTICE: drop cascades to 4 other objects
+ DETAIL: drop cascades to materialized view mvtest_mv_v
+ drop cascades to materialized view mvtest_mv_v_2
+ drop cascades to materialized view mvtest_mv_v_3
+ drop cascades to materialized view mvtest_mv_v_4
+ -- make sure that create WITH NO DATA does not plan the query (bug #13907)
+ create materialized view mvtest_error as select 1/0 as x; -- fail
+ ERROR: division by zero
+ create materialized view mvtest_error as select 1/0 as x with no data;
+ refresh materialized view mvtest_error; -- fail here
+ ERROR: division by zero
+ drop materialized view mvtest_error;
-- make sure that matview rows can be referenced as source rows (bug #9398)
-CREATE TABLE mvtest_v AS SELECT generate_series(1,10) AS a;
-CREATE MATERIALIZED VIEW mvtest_mv_v AS SELECT a FROM mvtest_v WHERE a <= 5;
-DELETE FROM mvtest_v WHERE EXISTS ( SELECT * FROM mvtest_mv_v WHERE mvtest_mv_v.a = mvtest_v.a );
-SELECT * FROM mvtest_v;
+CREATE TABLE v AS SELECT generate_series(1,10) AS a;
+CREATE MATERIALIZED VIEW mv_v AS SELECT a FROM v WHERE a <= 5;
+DELETE FROM v WHERE EXISTS ( SELECT * FROM mv_v WHERE mv_v.a = v.a );
+ERROR: could not plan this distributed delete
+DETAIL: correlated or complex DELETE is currently not supported in Postgres-XL.
+SELECT * FROM v;
a
----
+ 1
+ 2
+ 5
6
- 7
8
9
+ 3
+ 4
+ 7
10
-(5 rows)
+(10 rows)
- SELECT * FROM mv_v;
+ SELECT * FROM mvtest_mv_v;
a
---
1
2
+ 5
3
4
- 5
(5 rows)
- DROP TABLE v CASCADE;
- NOTICE: drop cascades to materialized view mv_v
+ DROP TABLE mvtest_v CASCADE;
+ NOTICE: drop cascades to materialized view mvtest_mv_v
-- make sure running as superuser works when MV owned by another role (bug #11208)
- CREATE ROLE user_dw;
- SET ROLE user_dw;
- CREATE TABLE foo_data AS SELECT i, md5(random()::text)
+ CREATE ROLE regress_user_mvtest;
+ SET ROLE regress_user_mvtest;
+ CREATE TABLE mvtest_foo_data AS SELECT i, md5(random()::text)
FROM generate_series(1, 10) i;
- CREATE MATERIALIZED VIEW mv_foo AS SELECT * FROM foo_data;
- CREATE MATERIALIZED VIEW mv_foo AS SELECT * FROM foo_data;
- ERROR: relation "mv_foo" already exists
- CREATE MATERIALIZED VIEW IF NOT EXISTS mv_foo AS SELECT * FROM foo_data;
- NOTICE: relation "mv_foo" already exists, skipping
- CREATE UNIQUE INDEX ON mv_foo (i);
+ CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ ERROR: relation "mvtest_mv_foo" already exists
+ CREATE MATERIALIZED VIEW IF NOT EXISTS mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ NOTICE: relation "mvtest_mv_foo" already exists, skipping
+ CREATE UNIQUE INDEX ON mvtest_mv_foo (i);
RESET ROLE;
-REFRESH MATERIALIZED VIEW mvtest_mv_foo;
-REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_foo;
-DROP OWNED BY regress_user_mvtest CASCADE;
-DROP ROLE regress_user_mvtest;
--- make sure that create WITH NO DATA works via SPI
-BEGIN;
-CREATE FUNCTION mvtest_func()
- RETURNS void AS $$
-BEGIN
- CREATE MATERIALIZED VIEW mvtest1 AS SELECT 1 AS x;
- CREATE MATERIALIZED VIEW mvtest2 AS SELECT 1 AS x WITH NO DATA;
-END;
-$$ LANGUAGE plpgsql;
-SELECT mvtest_func();
- mvtest_func
--------------
-
-(1 row)
-
-SELECT * FROM mvtest1;
- x
----
- 1
-(1 row)
-
-SELECT * FROM mvtest2;
-ERROR: materialized view "mvtest2" has not been populated
-HINT: Use the REFRESH MATERIALIZED VIEW command.
-ROLLBACK;
+REFRESH MATERIALIZED VIEW mv_foo;
+DROP OWNED BY user_dw CASCADE;
+DROP ROLE user_dw;
---
--- Cleanup resources
---
+ SET client_min_messages TO 'warning';
DROP FOREIGN DATA WRAPPER addr_fdw CASCADE;
+ERROR: foreign-data wrapper "addr_fdw" does not exist
DROP SCHEMA addr_nsp CASCADE;
- DROP OWNED BY regtest_addr_user;
- DROP USER regtest_addr_user;
+ DROP OWNED BY regress_addr_user;
+ DROP USER regress_addr_user;
insert into foo values(x);
return x;
end$$ language plpgsql;
- set statement_timeout to 2000;
- select blockme();
- NOTICE: nyeah nyeah, can't stop me
- blockme
- ---------
- 20
+ select subxact_rollback_semantics();
+ subxact_rollback_semantics
+ ----------------------------
+ 20
(1 row)
-select * from foo;
+reset statement_timeout;
+select * from foo order by 1;
f1
----
1
"test_replica_identity_hash" hash (nonkey)
"test_replica_identity_keyab" btree (keya, keyb)
Replica Identity: FULL
+Distribute By: REPLICATION
+Location Nodes: ALL DATANODES
+ Has OIDs: yes
ALTER TABLE test_replica_identity REPLICA IDENTITY NOTHING;
SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass;
-- default for superuser is false
+CREATE ROLE test_def_superuser;
+SELECT * FROM pg_authid WHERE rolname = 'test_def_superuser';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_def_superuser | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+CREATE ROLE test_superuser WITH SUPERUSER;
+SELECT * FROM pg_authid WHERE rolname = 'test_superuser';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_superuser | t | t | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_superuser WITH NOSUPERUSER;
+SELECT * FROM pg_authid WHERE rolname = 'test_superuser';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_superuser | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_superuser WITH SUPERUSER;
+SELECT * FROM pg_authid WHERE rolname = 'test_superuser';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_superuser | t | t | f | f | f | f | f | -1 | |
+(1 row)
+
+-- default for inherit is true
+CREATE ROLE test_def_inherit;
+SELECT * FROM pg_authid WHERE rolname = 'test_def_inherit';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_def_inherit | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+CREATE ROLE test_inherit WITH NOINHERIT;
+SELECT * FROM pg_authid WHERE rolname = 'test_inherit';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_inherit | f | f | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_inherit WITH INHERIT;
+SELECT * FROM pg_authid WHERE rolname = 'test_inherit';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_inherit | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_inherit WITH NOINHERIT;
+SELECT * FROM pg_authid WHERE rolname = 'test_inherit';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_inherit | f | f | f | f | f | f | f | -1 | |
+(1 row)
+
+-- default for create role is false
+CREATE ROLE test_def_createrole;
+SELECT * FROM pg_authid WHERE rolname = 'test_def_createrole';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+---------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_def_createrole | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+CREATE ROLE test_createrole WITH CREATEROLE;
+SELECT * FROM pg_authid WHERE rolname = 'test_createrole';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+-----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_createrole | f | t | t | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_createrole WITH NOCREATEROLE;
+SELECT * FROM pg_authid WHERE rolname = 'test_createrole';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+-----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_createrole | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_createrole WITH CREATEROLE;
+SELECT * FROM pg_authid WHERE rolname = 'test_createrole';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+-----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_createrole | f | t | t | f | f | f | f | -1 | |
+(1 row)
+
+-- default for create database is false
+CREATE ROLE test_def_createdb;
+SELECT * FROM pg_authid WHERE rolname = 'test_def_createdb';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+-------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_def_createdb | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+CREATE ROLE test_createdb WITH CREATEDB;
+SELECT * FROM pg_authid WHERE rolname = 'test_createdb';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+---------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_createdb | f | t | f | t | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_createdb WITH NOCREATEDB;
+SELECT * FROM pg_authid WHERE rolname = 'test_createdb';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+---------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_createdb | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_createdb WITH CREATEDB;
+SELECT * FROM pg_authid WHERE rolname = 'test_createdb';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+---------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_createdb | f | t | f | t | f | f | f | -1 | |
+(1 row)
+
+-- default for can login is false for role
+CREATE ROLE test_def_role_canlogin;
+SELECT * FROM pg_authid WHERE rolname = 'test_def_role_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_def_role_canlogin | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+CREATE ROLE test_role_canlogin WITH LOGIN;
+SELECT * FROM pg_authid WHERE rolname = 'test_role_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_role_canlogin | f | t | f | f | t | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_role_canlogin WITH NOLOGIN;
+SELECT * FROM pg_authid WHERE rolname = 'test_role_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_role_canlogin | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_role_canlogin WITH LOGIN;
+SELECT * FROM pg_authid WHERE rolname = 'test_role_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_role_canlogin | f | t | f | f | t | f | f | -1 | |
+(1 row)
+
+-- default for can login is true for user
+CREATE USER test_def_user_canlogin;
+SELECT * FROM pg_authid WHERE rolname = 'test_def_user_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_def_user_canlogin | f | t | f | f | t | f | f | -1 | |
+(1 row)
+
+CREATE USER test_user_canlogin WITH NOLOGIN;
+SELECT * FROM pg_authid WHERE rolname = 'test_user_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_user_canlogin | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER USER test_user_canlogin WITH LOGIN;
+SELECT * FROM pg_authid WHERE rolname = 'test_user_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_user_canlogin | f | t | f | f | t | f | f | -1 | |
+(1 row)
+
+ALTER USER test_user_canlogin WITH NOLOGIN;
+SELECT * FROM pg_authid WHERE rolname = 'test_user_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_user_canlogin | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+-- default for replication is false
+CREATE ROLE test_def_replication;
+SELECT * FROM pg_authid WHERE rolname = 'test_def_replication';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_def_replication | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+CREATE ROLE test_replication WITH REPLICATION;
+SELECT * FROM pg_authid WHERE rolname = 'test_replication';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_replication | f | t | f | f | f | t | f | -1 | |
+(1 row)
+
+ALTER ROLE test_replication WITH NOREPLICATION;
+SELECT * FROM pg_authid WHERE rolname = 'test_replication';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_replication | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_replication WITH REPLICATION;
+SELECT * FROM pg_authid WHERE rolname = 'test_replication';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_replication | f | t | f | f | f | t | f | -1 | |
+(1 row)
+
+-- default for bypassrls is false
+CREATE ROLE test_def_bypassrls;
+SELECT * FROM pg_authid WHERE rolname = 'test_def_bypassrls';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_def_bypassrls | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+CREATE ROLE test_bypassrls WITH BYPASSRLS;
+SELECT * FROM pg_authid WHERE rolname = 'test_bypassrls';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_bypassrls | f | t | f | f | f | f | t | -1 | |
+(1 row)
+
+ALTER ROLE test_bypassrls WITH NOBYPASSRLS;
+SELECT * FROM pg_authid WHERE rolname = 'test_bypassrls';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_bypassrls | f | t | f | f | f | f | f | -1 | |
+(1 row)
+
+ALTER ROLE test_bypassrls WITH BYPASSRLS;
+SELECT * FROM pg_authid WHERE rolname = 'test_bypassrls';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ test_bypassrls | f | t | f | f | f | f | t | -1 | |
+(1 row)
+
+-- remove the one role with LOGIN rights
+DROP ROLE test_role_canlogin;
+-- other roles not removed to test pg_dumpall role dump through
+-- pg_upgrade
+ CREATE ROLE regress_test_def_superuser;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_superuser';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_def_superuser | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ CREATE ROLE regress_test_superuser WITH SUPERUSER;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_superuser';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_superuser | t | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_superuser WITH NOSUPERUSER;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_superuser';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_superuser | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_superuser WITH SUPERUSER;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_superuser';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_superuser | t | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ -- default for inherit is true
+ CREATE ROLE regress_test_def_inherit;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_inherit';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ --------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_def_inherit | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ CREATE ROLE regress_test_inherit WITH NOINHERIT;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_inherit';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_inherit | f | f | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_inherit WITH INHERIT;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_inherit';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_inherit | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_inherit WITH NOINHERIT;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_inherit';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_inherit | f | f | f | f | f | f | f | -1 | |
+ (1 row)
+
+ -- default for create role is false
+ CREATE ROLE regress_test_def_createrole;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_createrole';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ -----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_def_createrole | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ CREATE ROLE regress_test_createrole WITH CREATEROLE;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createrole';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ -------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_createrole | f | t | t | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_createrole WITH NOCREATEROLE;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createrole';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ -------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_createrole | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_createrole WITH CREATEROLE;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createrole';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ -------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_createrole | f | t | t | f | f | f | f | -1 | |
+ (1 row)
+
+ -- default for create database is false
+ CREATE ROLE regress_test_def_createdb;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_createdb';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ---------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_def_createdb | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ CREATE ROLE regress_test_createdb WITH CREATEDB;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createdb';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ -----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_createdb | f | t | f | t | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_createdb WITH NOCREATEDB;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createdb';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ -----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_createdb | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_createdb WITH CREATEDB;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createdb';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ -----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_createdb | f | t | f | t | f | f | f | -1 | |
+ (1 row)
+
+ -- default for can login is false for role
+ CREATE ROLE regress_test_def_role_canlogin;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_role_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ --------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_def_role_canlogin | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ CREATE ROLE regress_test_role_canlogin WITH LOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_role_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_role_canlogin | f | t | f | f | t | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_role_canlogin WITH NOLOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_role_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_role_canlogin | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_role_canlogin WITH LOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_role_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_role_canlogin | f | t | f | f | t | f | f | -1 | |
+ (1 row)
+
+ -- default for can login is true for user
+ CREATE USER regress_test_def_user_canlogin;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_user_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ --------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_def_user_canlogin | f | t | f | f | t | f | f | -1 | |
+ (1 row)
+
+ CREATE USER regress_test_user_canlogin WITH NOLOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_user_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_user_canlogin | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER USER regress_test_user_canlogin WITH LOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_user_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_user_canlogin | f | t | f | f | t | f | f | -1 | |
+ (1 row)
+
+ ALTER USER regress_test_user_canlogin WITH NOLOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_user_canlogin';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_user_canlogin | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ -- default for replication is false
+ CREATE ROLE regress_test_def_replication;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_replication';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_def_replication | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ CREATE ROLE regress_test_replication WITH REPLICATION;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_replication';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ --------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_replication | f | t | f | f | f | t | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_replication WITH NOREPLICATION;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_replication';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ --------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_replication | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_replication WITH REPLICATION;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_replication';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ --------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_replication | f | t | f | f | f | t | f | -1 | |
+ (1 row)
+
+ -- default for bypassrls is false
+ CREATE ROLE regress_test_def_bypassrls;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_bypassrls';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_def_bypassrls | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ CREATE ROLE regress_test_bypassrls WITH BYPASSRLS;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_bypassrls';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_bypassrls | f | t | f | f | f | f | t | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_bypassrls WITH NOBYPASSRLS;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_bypassrls';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_bypassrls | f | t | f | f | f | f | f | -1 | |
+ (1 row)
+
+ ALTER ROLE regress_test_bypassrls WITH BYPASSRLS;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_bypassrls';
+ rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+ regress_test_bypassrls | f | t | f | f | f | f | t | -1 | |
+ (1 row)
+
+ -- clean up roles
+ DROP ROLE regress_test_def_superuser;
+ DROP ROLE regress_test_superuser;
+ DROP ROLE regress_test_def_inherit;
+ DROP ROLE regress_test_inherit;
+ DROP ROLE regress_test_def_createrole;
+ DROP ROLE regress_test_createrole;
+ DROP ROLE regress_test_def_createdb;
+ DROP ROLE regress_test_createdb;
+ DROP ROLE regress_test_def_role_canlogin;
+ DROP ROLE regress_test_role_canlogin;
+ DROP USER regress_test_def_user_canlogin;
+ DROP USER regress_test_user_canlogin;
+ DROP ROLE regress_test_def_replication;
+ DROP ROLE regress_test_replication;
+ DROP ROLE regress_test_def_bypassrls;
+ DROP ROLE regress_test_bypassrls;
-- user's security level must be higher than or equal to document's
CREATE POLICY p1 ON document
USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user));
- -- viewpoint from rls_regress_user1
- SET SESSION AUTHORIZATION rls_regress_user1;
+ -- viewpoint from regress_rls_bob
+ SET SESSION AUTHORIZATION regress_rls_bob;
SET row_security TO ON;
SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did;
-NOTICE: f_leak => my first novel
-NOTICE: f_leak => my first manga
-NOTICE: f_leak => great science fiction
-NOTICE: f_leak => great manga
did | cid | dlevel | dauthor | dtitle
-----+-----+--------+-------------------+-----------------------
- 1 | 11 | 1 | rls_regress_user1 | my first novel
- 4 | 44 | 1 | rls_regress_user1 | my first manga
- 6 | 22 | 1 | rls_regress_user2 | great science fiction
- 8 | 44 | 1 | rls_regress_user2 | great manga
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 4 | 44 | 1 | regress_rls_bob | my first manga
+ 6 | 22 | 1 | regress_rls_carol | great science fiction
+ 8 | 44 | 1 | regress_rls_carol | great manga
(4 rows)
SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did;
-NOTICE: f_leak => my first novel
-NOTICE: f_leak => my first manga
-NOTICE: f_leak => great science fiction
-NOTICE: f_leak => great manga
cid | did | dlevel | dauthor | dtitle | cname
-----+-----+--------+-------------------+-----------------------+-----------------
- 11 | 1 | 1 | rls_regress_user1 | my first novel | novel
- 44 | 4 | 1 | rls_regress_user1 | my first manga | manga
- 22 | 6 | 1 | rls_regress_user2 | great science fiction | science fiction
- 44 | 8 | 1 | rls_regress_user2 | great manga | manga
+ 11 | 1 | 1 | regress_rls_bob | my first novel | novel
+ 44 | 4 | 1 | regress_rls_bob | my first manga | manga
+ 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction
+ 44 | 8 | 1 | regress_rls_carol | great manga | manga
(4 rows)
-- try a sampled version
SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0)
WHERE f_leak(dtitle) ORDER BY did;
-NOTICE: f_leak => my first manga
-NOTICE: f_leak => great science fiction
-NOTICE: f_leak => great manga
did | cid | dlevel | dauthor | dtitle
-----+-----+--------+-------------------+-----------------------
- 4 | 44 | 1 | regress_rls_bob | my first manga
- 6 | 22 | 1 | regress_rls_carol | great science fiction
- 8 | 44 | 1 | regress_rls_carol | great manga
-(3 rows)
+ 6 | 22 | 1 | rls_regress_user2 | great science fiction
+ 8 | 44 | 1 | rls_regress_user2 | great manga
+(2 rows)
- -- viewpoint from rls_regress_user2
- SET SESSION AUTHORIZATION rls_regress_user2;
+ -- viewpoint from regress_rls_carol
+ SET SESSION AUTHORIZATION regress_rls_carol;
SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did;
-NOTICE: f_leak => my first novel
-NOTICE: f_leak => my second novel
-NOTICE: f_leak => my science fiction
-NOTICE: f_leak => my first manga
-NOTICE: f_leak => my second manga
-NOTICE: f_leak => great science fiction
-NOTICE: f_leak => great technology book
-NOTICE: f_leak => great manga
did | cid | dlevel | dauthor | dtitle
-----+-----+--------+-------------------+-----------------------
- 1 | 11 | 1 | rls_regress_user1 | my first novel
- 2 | 11 | 2 | rls_regress_user1 | my second novel
- 3 | 22 | 2 | rls_regress_user1 | my science fiction
- 4 | 44 | 1 | rls_regress_user1 | my first manga
- 5 | 44 | 2 | rls_regress_user1 | my second manga
- 6 | 22 | 1 | rls_regress_user2 | great science fiction
- 7 | 33 | 2 | rls_regress_user2 | great technology book
- 8 | 44 | 1 | rls_regress_user2 | great manga
+ 1 | 11 | 1 | regress_rls_bob | my first novel
+ 2 | 11 | 2 | regress_rls_bob | my second novel
+ 3 | 22 | 2 | regress_rls_bob | my science fiction
+ 4 | 44 | 1 | regress_rls_bob | my first manga
+ 5 | 44 | 2 | regress_rls_bob | my second manga
+ 6 | 22 | 1 | regress_rls_carol | great science fiction
+ 7 | 33 | 2 | regress_rls_carol | great technology book
+ 8 | 44 | 1 | regress_rls_carol | great manga
(8 rows)
SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did;
-NOTICE: f_leak => my first novel
-NOTICE: f_leak => my second novel
-NOTICE: f_leak => my science fiction
-NOTICE: f_leak => my first manga
-NOTICE: f_leak => my second manga
-NOTICE: f_leak => great science fiction
-NOTICE: f_leak => great technology book
-NOTICE: f_leak => great manga
cid | did | dlevel | dauthor | dtitle | cname
-----+-----+--------+-------------------+-----------------------+-----------------
- 11 | 1 | 1 | rls_regress_user1 | my first novel | novel
- 11 | 2 | 2 | rls_regress_user1 | my second novel | novel
- 22 | 3 | 2 | rls_regress_user1 | my science fiction | science fiction
- 44 | 4 | 1 | rls_regress_user1 | my first manga | manga
- 44 | 5 | 2 | rls_regress_user1 | my second manga | manga
- 22 | 6 | 1 | rls_regress_user2 | great science fiction | science fiction
- 33 | 7 | 2 | rls_regress_user2 | great technology book | technology
- 44 | 8 | 1 | rls_regress_user2 | great manga | manga
+ 11 | 1 | 1 | regress_rls_bob | my first novel | novel
+ 11 | 2 | 2 | regress_rls_bob | my second novel | novel
+ 22 | 3 | 2 | regress_rls_bob | my science fiction | science fiction
+ 44 | 4 | 1 | regress_rls_bob | my first manga | manga
+ 44 | 5 | 2 | regress_rls_bob | my second manga | manga
+ 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction
+ 33 | 7 | 2 | regress_rls_carol | great technology book | technology
+ 44 | 8 | 1 | regress_rls_carol | great manga | manga
(8 rows)
-- try a sampled version
ERROR: must be owner of relation document
DROP POLICY p1 ON document; --fail
ERROR: must be owner of relation document
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
ALTER POLICY p1 ON document USING (dauthor = current_user);
- -- viewpoint from rls_regress_user1 again
- SET SESSION AUTHORIZATION rls_regress_user1;
+ -- viewpoint from regress_rls_bob again
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did;
-NOTICE: f_leak => my first novel
-NOTICE: f_leak => my second novel
-NOTICE: f_leak => my science fiction
-NOTICE: f_leak => my first manga
-NOTICE: f_leak => my second manga
- did | cid | dlevel | dauthor | dtitle
------+-----+--------+-----------------+--------------------
- 1 | 11 | 1 | regress_rls_bob | my first novel
- 2 | 11 | 2 | regress_rls_bob | my second novel
- 3 | 22 | 2 | regress_rls_bob | my science fiction
- 4 | 44 | 1 | regress_rls_bob | my first manga
- 5 | 44 | 2 | regress_rls_bob | my second manga
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+--------------------
+ 1 | 11 | 1 | rls_regress_user1 | my first novel
+ 2 | 11 | 2 | rls_regress_user1 | my second novel
+ 3 | 22 | 2 | rls_regress_user1 | my science fiction
+ 4 | 44 | 1 | rls_regress_user1 | my first manga
+ 5 | 44 | 2 | rls_regress_user1 | my second manga
(5 rows)
SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER by did;
-NOTICE: f_leak => my first novel
-NOTICE: f_leak => my second novel
-NOTICE: f_leak => my science fiction
-NOTICE: f_leak => my first manga
-NOTICE: f_leak => my second manga
- cid | did | dlevel | dauthor | dtitle | cname
------+-----+--------+-----------------+--------------------+-----------------
- 11 | 1 | 1 | regress_rls_bob | my first novel | novel
- 11 | 2 | 2 | regress_rls_bob | my second novel | novel
- 22 | 3 | 2 | regress_rls_bob | my science fiction | science fiction
- 44 | 4 | 1 | regress_rls_bob | my first manga | manga
- 44 | 5 | 2 | regress_rls_bob | my second manga | manga
+ cid | did | dlevel | dauthor | dtitle | cname
+-----+-----+--------+-------------------+--------------------+-----------------
+ 11 | 1 | 1 | rls_regress_user1 | my first novel | novel
+ 11 | 2 | 2 | rls_regress_user1 | my second novel | novel
+ 22 | 3 | 2 | rls_regress_user1 | my science fiction | science fiction
+ 44 | 4 | 1 | rls_regress_user1 | my first manga | manga
+ 44 | 5 | 2 | rls_regress_user1 | my second manga | manga
(5 rows)
- -- viewpoint from rls_regres_user2 again
- SET SESSION AUTHORIZATION rls_regress_user2;
+ -- viewpoint from rls_regres_carol again
+ SET SESSION AUTHORIZATION regress_rls_carol;
SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did;
-NOTICE: f_leak => great science fiction
-NOTICE: f_leak => great technology book
-NOTICE: f_leak => great manga
did | cid | dlevel | dauthor | dtitle
-----+-----+--------+-------------------+-----------------------
- 6 | 22 | 1 | rls_regress_user2 | great science fiction
- 7 | 33 | 2 | rls_regress_user2 | great technology book
- 8 | 44 | 1 | rls_regress_user2 | great manga
+ 6 | 22 | 1 | regress_rls_carol | great science fiction
+ 7 | 33 | 2 | regress_rls_carol | great technology book
+ 8 | 44 | 1 | regress_rls_carol | great manga
(3 rows)
SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER by did;
-NOTICE: f_leak => great science fiction
-NOTICE: f_leak => great technology book
-NOTICE: f_leak => great manga
cid | did | dlevel | dauthor | dtitle | cname
-----+-----+--------+-------------------+-----------------------+-----------------
- 22 | 6 | 1 | rls_regress_user2 | great science fiction | science fiction
- 33 | 7 | 2 | rls_regress_user2 | great technology book | technology
- 44 | 8 | 1 | rls_regress_user2 | great manga | manga
+ 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction
+ 33 | 7 | 2 | regress_rls_carol | great technology book | technology
+ 44 | 8 | 1 | regress_rls_carol | great manga | manga
(3 rows)
EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle);
Filter: f_leak(document.dtitle)
-> Seq Scan on document document_1
Filter: (dauthor = "current_user"())
- -> Index Scan using category_pkey on category
- Index Cond: (cid = document.cid)
-(7 rows)
+(5 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle);
+ QUERY PLAN
+----------------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Nested Loop
+ -> Subquery Scan on document
+ Filter: f_leak(document.dtitle)
+ -> Seq Scan on document document_1
+ Filter: (dauthor = "current_user"())
+ -> Index Scan using category_pkey on category
+ Index Cond: (cid = document.cid)
+(8 rows)
-- interaction of FK/PK constraints
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
CREATE POLICY p2 ON category
- USING (CASE WHEN current_user = 'rls_regress_user1' THEN cid IN (11, 33)
- WHEN current_user = 'rls_regress_user2' THEN cid IN (22, 44)
+ USING (CASE WHEN current_user = 'regress_rls_bob' THEN cid IN (11, 33)
+ WHEN current_user = 'regress_rls_carol' THEN cid IN (22, 44)
ELSE false END);
ALTER TABLE category ENABLE ROW LEVEL SECURITY;
-- cannot delete PK referenced by invisible FK
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid;
- did | cid | dlevel | dauthor | dtitle | cid | cname
------+-----+--------+-----------------+--------------------+-----+------------
- 2 | 11 | 2 | regress_rls_bob | my second novel | 11 | novel
- 1 | 11 | 1 | regress_rls_bob | my first novel | 11 | novel
- | | | | | 33 | technology
- 5 | 44 | 2 | regress_rls_bob | my second manga | |
- 4 | 44 | 1 | regress_rls_bob | my first manga | |
- 3 | 22 | 2 | regress_rls_bob | my science fiction | |
+ did | cid | dlevel | dauthor | dtitle | cid | cname
+-----+-----+--------+-------------------+--------------------+-----+------------
+ 4 | 44 | 1 | rls_regress_user1 | my first manga | |
+ 5 | 44 | 2 | rls_regress_user1 | my second manga | |
+ 2 | 11 | 2 | rls_regress_user1 | my second novel | 11 | novel
+ 1 | 11 | 1 | rls_regress_user1 | my first novel | 11 | novel
+ | | | | | 33 | technology
+ 3 | 22 | 2 | rls_regress_user1 | my science fiction | |
(6 rows)
DELETE FROM category WHERE cid = 33; -- fails with FK violation
-> LockRows
-> Result
-> Append
- -> Seq Scan on t1 t1_1
- Filter: ((a % 2) = 0)
- -> Seq Scan on t2
- Filter: ((a % 2) = 0)
- -> Seq Scan on t3
- Filter: ((a % 2) = 0)
-(12 rows)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t1 t1_1
+ Filter: ((a % 2) = 0)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t2
+ Filter: ((a % 2) = 0)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t3
+ Filter: ((a % 2) = 0)
+(15 rows)
+
+-- union all query
+SELECT a, b, oid FROM t2 UNION ALL SELECT a, b, oid FROM t3;
+ a | b | oid
+---+-----+-----
+ 1 | abc | 201
+ 3 | cde | 203
+ 1 | xxx | 301
+ 2 | yyy | 302
+ 3 | zzz | 303
+(5 rows)
+
+EXPLAIN (COSTS OFF) SELECT a, b, oid FROM t2 UNION ALL SELECT a, b, oid FROM t3;
+ QUERY PLAN
+-----------------------------------------------------------
+ Append
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t2
+ Filter: ((a % 2) = 1)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t3
+(6 rows)
+ -- union all query
+ SELECT a, b, oid FROM t2 UNION ALL SELECT a, b, oid FROM t3;
+ a | b | oid
+ ---+-----+-----
+ 1 | abc | 201
+ 3 | cde | 203
+ 1 | xxx | 301
+ 2 | yyy | 302
+ 3 | zzz | 303
+ (5 rows)
+
+ EXPLAIN (COSTS OFF) SELECT a, b, oid FROM t2 UNION ALL SELECT a, b, oid FROM t3;
+ QUERY PLAN
+ -------------------------------
+ Append
+ -> Seq Scan on t2
+ Filter: ((a % 2) = 1)
+ -> Seq Scan on t3
+ (4 rows)
+
-- superuser is allowed to bypass RLS checks
RESET SESSION AUTHORIZATION;
SET row_security TO OFF;
SELECT * FROM t1 WHERE f_leak(b);
-NOTICE: f_leak => aba
-NOTICE: f_leak => bbb
-NOTICE: f_leak => ccc
-NOTICE: f_leak => dad
-NOTICE: f_leak => abc
-NOTICE: f_leak => bcd
-NOTICE: f_leak => cde
-NOTICE: f_leak => def
-NOTICE: f_leak => xxx
-NOTICE: f_leak => yyy
-NOTICE: f_leak => zzz
a | b
---+-----
- 1 | aaa
+ 1 | aba
2 | bbb
3 | ccc
- 4 | ddd
+ 4 | dad
1 | abc
2 | bcd
3 | cde
(11 rows)
EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b);
- QUERY PLAN
----------------------------
+ QUERY PLAN
+-----------------------------------------------------------
Append
- -> Seq Scan on t1
- Filter: f_leak(b)
- -> Seq Scan on t2
- Filter: f_leak(b)
- -> Seq Scan on t3
- Filter: f_leak(b)
-(7 rows)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t1
+ Filter: f_leak(b)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t2
+ Filter: f_leak(b)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t3
+ Filter: f_leak(b)
+(10 rows)
-- non-superuser with bypass privilege can bypass RLS policy when disabled
- SET SESSION AUTHORIZATION rls_regress_exempt_user;
+ SET SESSION AUTHORIZATION regress_rls_exempt_user;
SET row_security TO OFF;
SELECT * FROM t1 WHERE f_leak(b);
-NOTICE: f_leak => aba
-NOTICE: f_leak => bbb
-NOTICE: f_leak => ccc
-NOTICE: f_leak => dad
-NOTICE: f_leak => abc
-NOTICE: f_leak => bcd
-NOTICE: f_leak => cde
-NOTICE: f_leak => def
-NOTICE: f_leak => xxx
-NOTICE: f_leak => yyy
-NOTICE: f_leak => zzz
a | b
---+-----
- 1 | aaa
+ 1 | aba
2 | bbb
3 | ccc
- 4 | ddd
+ 4 | dad
1 | abc
2 | bcd
3 | cde
(11 rows)
EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b);
- QUERY PLAN
----------------------------
+ QUERY PLAN
+-----------------------------------------------------------
Append
- -> Seq Scan on t1
- Filter: f_leak(b)
- -> Seq Scan on t2
- Filter: f_leak(b)
- -> Seq Scan on t3
- Filter: f_leak(b)
-(7 rows)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t1
+ Filter: f_leak(b)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t2
+ Filter: f_leak(b)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t3
+ Filter: f_leak(b)
+(10 rows)
----- Dependencies -----
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
SET row_security TO ON;
CREATE TABLE dependee (x integer, y integer);
CREATE TABLE dependent (x integer, y integer);
--
-- Mutual recursion via views
--
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
CREATE VIEW rec1v AS SELECT * FROM rec1;
CREATE VIEW rec2v AS SELECT * FROM rec2;
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
ALTER POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y));
+ERROR: relation "rec2v" does not exist
ALTER POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1v WHERE y = b));
-SET SESSION AUTHORIZATION regress_rls_bob;
+ERROR: relation "rec1v" does not exist
+SET SESSION AUTHORIZATION rls_regress_user1;
SELECT * FROM rec1; -- fail, mutual recursion via views
ERROR: infinite recursion detected in policy for relation "rec1"
--
RESET client_min_messages;
CREATE VIEW rec1v WITH (security_barrier) AS SELECT * FROM rec1;
CREATE VIEW rec2v WITH (security_barrier) AS SELECT * FROM rec2;
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
CREATE POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y));
+ERROR: policy "r1" for relation "rec1" already exists
CREATE POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1v WHERE y = b));
-SET SESSION AUTHORIZATION regress_rls_bob;
+ERROR: policy "r2" for relation "rec2" already exists
+SET SESSION AUTHORIZATION rls_regress_user1;
SELECT * FROM rec1; -- fail, mutual recursion via s.b. views
ERROR: infinite recursion detected in policy for relation "rec1"
--
ERROR: infinite recursion detected in policy for relation "s1"
INSERT INTO s1 VALUES (1, 'foo'); -- fail (infinite recursion)
ERROR: infinite recursion detected in policy for relation "s1"
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
DROP POLICY p3 on s1;
ALTER POLICY p2 ON s2 USING (x % 2 = 0);
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM s1 WHERE f_leak(b); -- OK
-NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
a | b
---+----------------------------------
2 | c81e728d9d4c2f636f067f89cc14862c
(2 rows)
EXPLAIN (COSTS OFF) SELECT * FROM only s1 WHERE f_leak(b);
- QUERY PLAN
-----------------------------------------------------------
- Subquery Scan on s1
- Filter: f_leak(s1.b)
- -> Hash Join
- Hash Cond: (s1_1.a = s2.x)
- -> Seq Scan on s1 s1_1
- -> Hash
- -> HashAggregate
- Group Key: s2.x
- -> Subquery Scan on s2
- Filter: (s2.y ~~ '%2f%'::text)
- -> Seq Scan on s2 s2_1
- Filter: ((x % 2) = 0)
-(12 rows)
+ QUERY PLAN
+----------------------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Subquery Scan on s1
+ Filter: f_leak(s1.b)
+ -> Hash Join
+ Hash Cond: (s1_1.a = s2.x)
+ -> Seq Scan on s1 s1_1
+ -> Hash
+ -> HashAggregate
+ Group Key: s2.x
+ -> Subquery Scan on s2
+ Filter: (s2.y ~~ '%2f%'::text)
+ -> Seq Scan on s2 s2_1
+ Filter: ((x % 2) = 0)
+(13 rows)
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
ALTER POLICY p1 ON s1 USING (a in (select x from v2)); -- using VIEW in RLS policy
-SET SESSION AUTHORIZATION regress_rls_bob;
+ERROR: relation "v2" does not exist
+SET SESSION AUTHORIZATION rls_regress_user1;
SELECT * FROM s1 WHERE f_leak(b); -- OK
-NOTICE: f_leak => 0267aaf632e87a63288a08331f22c7c3
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
- a | b
-----+----------------------------------
- -4 | 0267aaf632e87a63288a08331f22c7c3
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
+ a | b
+---+----------------------------------
+ 2 | c81e728d9d4c2f636f067f89cc14862c
+ 4 | a87ff679a2f3e71d9181a67b7542122c
(2 rows)
EXPLAIN (COSTS OFF) SELECT * FROM s1 WHERE f_leak(b);
(3 rows)
EXPLAIN (COSTS OFF) SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%';
- QUERY PLAN
---------------------------------------------------------------------
- Subquery Scan on s2
- Filter: (s2.y ~~ '%28%'::text)
- -> Seq Scan on s2 s2_1
- Filter: ((x % 2) = 0)
- SubPlan 1
- -> Limit
- -> Subquery Scan on s1
- -> Nested Loop Semi Join
- Join Filter: (s1_1.a = s2_2.x)
- -> Seq Scan on s1 s1_1
- -> Materialize
- -> Subquery Scan on s2_2
- Filter: (s2_2.y ~~ '%af%'::text)
- -> Seq Scan on s2 s2_3
- Filter: ((x % 2) = 0)
-(15 rows)
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Subquery Scan on s2
+ Filter: (s2.y ~~ '%28%'::text)
+ -> Seq Scan on s2 s2_1
+ Filter: ((x % 2) = 0)
+ SubPlan 1
+ -> Limit
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Limit
+ -> Subquery Scan on s1
+ -> Nested Loop Semi Join
+ Join Filter: (s1_1.a = s2_2.x)
+ -> Seq Scan on s1 s1_1
+ -> Materialize
+ -> Subquery Scan on s2_2
+ Filter: (s2_2.y ~~ '%2f%'::text)
+ -> Seq Scan on s2 s2_3
+ Filter: ((x % 2) = 0)
+(18 rows)
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
ALTER POLICY p2 ON s2 USING (x in (select a from s1 where b like '%d2%'));
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM s1 WHERE f_leak(b); -- fail (infinite recursion via view)
ERROR: infinite recursion detected in policy for relation "s1"
- -- prepared statement with rls_regress_user0 privilege
+ -- prepared statement with regress_rls_alice privilege
PREPARE p1(int) AS SELECT * FROM t1 WHERE a <= $1;
EXECUTE p1(2);
a | b
RESET SESSION AUTHORIZATION;
SET row_security TO OFF;
SELECT * FROM t1 WHERE f_leak(b);
-NOTICE: f_leak => aba
-NOTICE: f_leak => bbb
-NOTICE: f_leak => ccc
-NOTICE: f_leak => dad
-NOTICE: f_leak => abc
-NOTICE: f_leak => bcd
-NOTICE: f_leak => cde
-NOTICE: f_leak => def
-NOTICE: f_leak => xxx
-NOTICE: f_leak => yyy
-NOTICE: f_leak => zzz
a | b
---+-----
- 1 | aaa
+ 1 | aba
2 | bbb
3 | ccc
- 4 | ddd
+ 4 | dad
1 | abc
2 | bcd
3 | cde
(3 rows)
EXPLAIN (COSTS OFF) EXECUTE p2(2);
- QUERY PLAN
--------------------------
+ QUERY PLAN
+-----------------------------------------------------------
Append
- -> Seq Scan on t1
- Filter: (a = 2)
- -> Seq Scan on t2
- Filter: (a = 2)
- -> Seq Scan on t3
- Filter: (a = 2)
-(7 rows)
+ -> Remote Subquery Scan on all (datanode_1)
+ -> Seq Scan on t1
+ Filter: (a = 2)
+ -> Remote Subquery Scan on all (datanode_1)
+ -> Seq Scan on t2
+ Filter: (a = 2)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on t3
+ Filter: (a = 2)
+(10 rows)
-- also, case when privilege switch from superuser
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SET row_security TO ON;
EXECUTE p2(2);
a | b
--
-- UPDATE / DELETE and Row-level security
--
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
EXPLAIN (COSTS OFF) UPDATE t1 SET b = b || b WHERE f_leak(b);
- QUERY PLAN
--------------------------------------------
- Update on t1 t1_3
- Update on t1 t1_3
- Update on t2 t1
- Update on t3 t1
- -> Subquery Scan on t1
- Filter: f_leak(t1.b)
- -> LockRows
- -> Seq Scan on t1 t1_4
- Filter: ((a % 2) = 0)
- -> Subquery Scan on t1_1
- Filter: f_leak(t1_1.b)
- -> LockRows
- -> Seq Scan on t2
- Filter: ((a % 2) = 0)
- -> Subquery Scan on t1_2
- Filter: f_leak(t1_2.b)
- -> LockRows
- -> Seq Scan on t3
- Filter: ((a % 2) = 0)
-(19 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Update on t1 t1_3
+ Update on t1 t1_3
+ Update on t2 t1
+ Update on t3 t1
+ -> Subquery Scan on t1
+ Filter: f_leak(t1.b)
+ -> LockRows
+ -> Seq Scan on t1 t1_4
+ Filter: ((a % 2) = 0)
+ -> Subquery Scan on t1_1
+ Filter: f_leak(t1_1.b)
+ -> LockRows
+ -> Seq Scan on t2
+ Filter: ((a % 2) = 0)
+ -> Subquery Scan on t1_2
+ Filter: f_leak(t1_2.b)
+ -> LockRows
+ -> Seq Scan on t3
+ Filter: ((a % 2) = 0)
+(20 rows)
UPDATE t1 SET b = b || b WHERE f_leak(b);
-NOTICE: f_leak => bbb
-NOTICE: f_leak => dad
-NOTICE: f_leak => bcd
-NOTICE: f_leak => def
-NOTICE: f_leak => yyy
EXPLAIN (COSTS OFF) UPDATE only t1 SET b = b || '_updt' WHERE f_leak(b);
- QUERY PLAN
--------------------------------------------
- Update on t1 t1_1
- -> Subquery Scan on t1
- Filter: f_leak(t1.b)
- -> LockRows
- -> Seq Scan on t1 t1_2
- Filter: ((a % 2) = 0)
-(6 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Update on t1 t1_1
+ -> Subquery Scan on t1
+ Filter: f_leak(t1.b)
+ -> LockRows
+ -> Seq Scan on t1 t1_2
+ Filter: ((a % 2) = 0)
+(7 rows)
UPDATE only t1 SET b = b || '_updt' WHERE f_leak(b);
-NOTICE: f_leak => bbbbbb
-NOTICE: f_leak => daddad
-- returning clause with system column
UPDATE only t1 SET b = b WHERE f_leak(b) RETURNING oid, *, t1;
-NOTICE: f_leak => bbbbbb_updt
-NOTICE: f_leak => daddad_updt
oid | a | b | t1
-----+---+-------------+-----------------
102 | 2 | bbbbbb_updt | (2,bbbbbb_updt)
4 | defdef
(11 rows)
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SET row_security TO ON;
EXPLAIN (COSTS OFF) DELETE FROM only t1 WHERE f_leak(b);
- QUERY PLAN
--------------------------------------------
- Delete on t1 t1_1
- -> Subquery Scan on t1
- Filter: f_leak(t1.b)
- -> LockRows
- -> Seq Scan on t1 t1_2
- Filter: ((a % 2) = 0)
-(6 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Delete on t1 t1_1
+ -> Subquery Scan on t1
+ Filter: f_leak(t1.b)
+ -> LockRows
+ -> Seq Scan on t1 t1_2
+ Filter: ((a % 2) = 0)
+(7 rows)
EXPLAIN (COSTS OFF) DELETE FROM t1 WHERE f_leak(b);
- QUERY PLAN
--------------------------------------------
- Delete on t1 t1_3
- Delete on t1 t1_3
- Delete on t2 t1
- Delete on t3 t1
- -> Subquery Scan on t1
- Filter: f_leak(t1.b)
- -> LockRows
- -> Seq Scan on t1 t1_4
- Filter: ((a % 2) = 0)
- -> Subquery Scan on t1_1
- Filter: f_leak(t1_1.b)
- -> LockRows
- -> Seq Scan on t2
- Filter: ((a % 2) = 0)
- -> Subquery Scan on t1_2
- Filter: f_leak(t1_2.b)
- -> LockRows
- -> Seq Scan on t3
- Filter: ((a % 2) = 0)
-(19 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Delete on t1 t1_3
+ Delete on t1 t1_3
+ Delete on t2 t1
+ Delete on t3 t1
+ -> Subquery Scan on t1
+ Filter: f_leak(t1.b)
+ -> LockRows
+ -> Seq Scan on t1 t1_4
+ Filter: ((a % 2) = 0)
+ -> Subquery Scan on t1_1
+ Filter: f_leak(t1_1.b)
+ -> LockRows
+ -> Seq Scan on t2
+ Filter: ((a % 2) = 0)
+ -> Subquery Scan on t1_2
+ Filter: f_leak(t1_2.b)
+ -> LockRows
+ -> Seq Scan on t3
+ Filter: ((a % 2) = 0)
+(20 rows)
DELETE FROM only t1 WHERE f_leak(b) RETURNING oid, *, t1;
-NOTICE: f_leak => bbbbbb_updt
-NOTICE: f_leak => daddad_updt
oid | a | b | t1
-----+---+-------------+-----------------
102 | 2 | bbbbbb_updt | (2,bbbbbb_updt)
INSERT INTO b1 (SELECT x, md5(x::text) FROM generate_series(-10,10) x);
CREATE POLICY p1 ON b1 USING (a % 2 = 0);
ALTER TABLE b1 ENABLE ROW LEVEL SECURITY;
- GRANT ALL ON b1 TO rls_regress_user1;
- SET SESSION AUTHORIZATION rls_regress_user1;
+ GRANT ALL ON b1 TO regress_rls_bob;
+ SET SESSION AUTHORIZATION regress_rls_bob;
CREATE VIEW bv1 WITH (security_barrier) AS SELECT * FROM b1 WHERE a > 0 WITH CHECK OPTION;
- GRANT ALL ON bv1 TO rls_regress_user2;
- SET SESSION AUTHORIZATION rls_regress_user2;
+ GRANT ALL ON bv1 TO regress_rls_carol;
+ SET SESSION AUTHORIZATION regress_rls_carol;
EXPLAIN (COSTS OFF) SELECT * FROM bv1 WHERE f_leak(b);
- QUERY PLAN
----------------------------------------------
- Subquery Scan on bv1
- Filter: f_leak(bv1.b)
- -> Seq Scan on b1
- Filter: ((a > 0) AND ((a % 2) = 0))
-(4 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Subquery Scan on bv1
+ Filter: f_leak(bv1.b)
+ -> Seq Scan on b1
+ Filter: ((a > 0) AND ((a % 2) = 0))
+(5 rows)
SELECT * FROM bv1 WHERE f_leak(b);
-NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
-NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d
-NOTICE: f_leak => d3d9446802a44259755d38e6d163e820
a | b
----+----------------------------------
2 | c81e728d9d4c2f636f067f89cc14862c
(5 rows)
INSERT INTO bv1 VALUES (-1, 'xxx'); -- should fail view WCO
- ERROR: new row violates row level security policy for "b1"
+ ERROR: new row violates row-level security policy for table "b1"
INSERT INTO bv1 VALUES (11, 'xxx'); -- should fail RLS check
- ERROR: new row violates row level security policy for "b1"
+ ERROR: new row violates row-level security policy for table "b1"
INSERT INTO bv1 VALUES (12, 'xxx'); -- ok
EXPLAIN (COSTS OFF) UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b);
- QUERY PLAN
----------------------------------------------------------------------------
- Update on b1 b1_1
- -> Subquery Scan on b1
- Filter: f_leak(b1.b)
- -> Subquery Scan on b1_2
- -> LockRows
- -> Seq Scan on b1 b1_3
- Filter: ((a > 0) AND (a = 4) AND ((a % 2) = 0))
-(7 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------
+ Remote Subquery Scan on all (datanode_2)
+ -> Update on b1 b1_1
+ -> Subquery Scan on b1
+ Filter: f_leak(b1.b)
+ -> Subquery Scan on b1_2
+ -> LockRows
+ -> Seq Scan on b1 b1_3
+ Filter: ((a > 0) AND (a = 4) AND ((a % 2) = 0))
+(8 rows)
UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b);
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
EXPLAIN (COSTS OFF) DELETE FROM bv1 WHERE a = 6 AND f_leak(b);
- QUERY PLAN
----------------------------------------------------------------------------
- Delete on b1 b1_1
- -> Subquery Scan on b1
- Filter: f_leak(b1.b)
- -> Subquery Scan on b1_2
- -> LockRows
- -> Seq Scan on b1 b1_3
- Filter: ((a > 0) AND (a = 6) AND ((a % 2) = 0))
-(7 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------
+ Remote Subquery Scan on all (datanode_1)
+ -> Delete on b1 b1_1
+ -> Subquery Scan on b1
+ Filter: f_leak(b1.b)
+ -> Subquery Scan on b1_2
+ -> LockRows
+ -> Seq Scan on b1 b1_3
+ Filter: ((a > 0) AND (a = 6) AND ((a % 2) = 0))
+(8 rows)
DELETE FROM bv1 WHERE a = 6 AND f_leak(b);
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
-SET SESSION AUTHORIZATION regress_rls_alice;
+SET SESSION AUTHORIZATION rls_regress_user0;
SELECT * FROM b1;
a | b
-----+----------------------------------
--
-- ROLE/GROUP
--
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
CREATE TABLE z1 (a int, b text);
- GRANT SELECT ON z1 TO rls_regress_group1, rls_regress_group2,
- rls_regress_user1, rls_regress_user2;
+ CREATE TABLE z2 (a int, b text);
+ GRANT SELECT ON z1,z2 TO regress_rls_group1, regress_rls_group2,
+ regress_rls_bob, regress_rls_carol;
INSERT INTO z1 VALUES
- (1, 'aaa'),
+ (1, 'aba'),
(2, 'bbb'),
(3, 'ccc'),
- (4, 'ddd');
- CREATE POLICY p1 ON z1 TO rls_regress_group1 USING (a % 2 = 0);
- CREATE POLICY p2 ON z1 TO rls_regress_group2 USING (a % 2 = 1);
+ (4, 'dad');
+ CREATE POLICY p1 ON z1 TO regress_rls_group1 USING (a % 2 = 0);
+ CREATE POLICY p2 ON z1 TO regress_rls_group2 USING (a % 2 = 1);
ALTER TABLE z1 ENABLE ROW LEVEL SECURITY;
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM z1 WHERE f_leak(b);
-NOTICE: f_leak => bbb
-NOTICE: f_leak => dad
a | b
---+-----
2 | bbb
(2 rows)
EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b);
- QUERY PLAN
--------------------------------
- Subquery Scan on z1
- Filter: f_leak(z1.b)
- -> Seq Scan on z1 z1_1
- Filter: ((a % 2) = 0)
-(4 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+(5 rows)
- SET ROLE rls_regress_group1;
+ PREPARE plancache_test AS SELECT * FROM z1 WHERE f_leak(b);
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test;
+ QUERY PLAN
+ -------------------------------
+ Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+ (4 rows)
+
+ PREPARE plancache_test2 AS WITH q AS (SELECT * FROM z1 WHERE f_leak(b)) SELECT * FROM q,z2;
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test2;
+ QUERY PLAN
+ ---------------------------------------
+ Nested Loop
+ CTE q
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+ -> CTE Scan on q
+ -> Materialize
+ -> Seq Scan on z2
+ (9 rows)
+
+ PREPARE plancache_test3 AS WITH q AS (SELECT * FROM z2) SELECT * FROM q,z1 WHERE f_leak(z1.b);
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test3;
+ QUERY PLAN
+ -------------------------------------------
+ Nested Loop
+ CTE q
+ -> Seq Scan on z2
+ -> CTE Scan on q
+ -> Materialize
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+ (9 rows)
+
+ SET ROLE regress_rls_group1;
SELECT * FROM z1 WHERE f_leak(b);
-NOTICE: f_leak => bbb
-NOTICE: f_leak => dad
a | b
---+-----
2 | bbb
(2 rows)
EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b);
- QUERY PLAN
--------------------------------
- Subquery Scan on z1
- Filter: f_leak(z1.b)
- -> Seq Scan on z1 z1_1
- Filter: ((a % 2) = 0)
-(4 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+(5 rows)
- SET SESSION AUTHORIZATION rls_regress_user2;
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test;
+ QUERY PLAN
+ -------------------------------
+ Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+ (4 rows)
+
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test2;
+ QUERY PLAN
+ ---------------------------------------
+ Nested Loop
+ CTE q
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+ -> CTE Scan on q
+ -> Materialize
+ -> Seq Scan on z2
+ (9 rows)
+
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test3;
+ QUERY PLAN
+ -------------------------------------------
+ Nested Loop
+ CTE q
+ -> Seq Scan on z2
+ -> CTE Scan on q
+ -> Materialize
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+ (9 rows)
+
+ SET SESSION AUTHORIZATION regress_rls_carol;
SELECT * FROM z1 WHERE f_leak(b);
-NOTICE: f_leak => aba
-NOTICE: f_leak => ccc
a | b
---+-----
- 1 | aaa
+ 1 | aba
3 | ccc
(2 rows)
EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b);
- QUERY PLAN
--------------------------------
- Subquery Scan on z1
- Filter: f_leak(z1.b)
- -> Seq Scan on z1 z1_1
- Filter: ((a % 2) = 1)
-(4 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 1)
+(5 rows)
- SET ROLE rls_regress_group2;
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test;
+ QUERY PLAN
+ -------------------------------
+ Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 1)
+ (4 rows)
+
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test2;
+ QUERY PLAN
+ ---------------------------------------
+ Nested Loop
+ CTE q
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 1)
+ -> CTE Scan on q
+ -> Materialize
+ -> Seq Scan on z2
+ (9 rows)
+
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test3;
+ QUERY PLAN
+ -------------------------------------------
+ Nested Loop
+ CTE q
+ -> Seq Scan on z2
+ -> CTE Scan on q
+ -> Materialize
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 1)
+ (9 rows)
+
+ SET ROLE regress_rls_group2;
SELECT * FROM z1 WHERE f_leak(b);
-NOTICE: f_leak => aba
-NOTICE: f_leak => ccc
a | b
---+-----
- 1 | aaa
+ 1 | aba
3 | ccc
(2 rows)
EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b);
- QUERY PLAN
--------------------------------
- Subquery Scan on z1
- Filter: f_leak(z1.b)
- -> Seq Scan on z1 z1_1
- Filter: ((a % 2) = 1)
-(4 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 1)
+(5 rows)
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test;
+ QUERY PLAN
+ -------------------------------
+ Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 1)
+ (4 rows)
+
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test2;
+ QUERY PLAN
+ ---------------------------------------
+ Nested Loop
+ CTE q
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 1)
+ -> CTE Scan on q
+ -> Materialize
+ -> Seq Scan on z2
+ (9 rows)
+
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test3;
+ QUERY PLAN
+ -------------------------------------------
+ Nested Loop
+ CTE q
+ -> Seq Scan on z2
+ -> CTE Scan on q
+ -> Materialize
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 1)
+ (9 rows)
+
--
-- Views should follow policy for view owner.
--
-- View and Table owner are the same.
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b);
- GRANT SELECT ON rls_view TO rls_regress_user1;
+ GRANT SELECT ON rls_view TO regress_rls_bob;
-- Query as role that is not owner of view or table. Should return all records.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM rls_view;
-NOTICE: f_leak => aba
-NOTICE: f_leak => bbb
-NOTICE: f_leak => ccc
-NOTICE: f_leak => dad
a | b
---+-----
- 1 | aaa
+ 1 | aba
2 | bbb
3 | ccc
- 4 | ddd
+ 4 | dad
(4 rows)
EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
- QUERY PLAN
----------------------
- Seq Scan on z1
- Filter: f_leak(b)
-(2 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on z1
+ Filter: f_leak(b)
+(3 rows)
-- Query as view/table owner. Should return all records.
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
SELECT * FROM rls_view;
-NOTICE: f_leak => aba
-NOTICE: f_leak => bbb
-NOTICE: f_leak => ccc
-NOTICE: f_leak => dad
a | b
---+-----
- 1 | aaa
+ 1 | aba
2 | bbb
3 | ccc
- 4 | ddd
+ 4 | dad
(4 rows)
EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
DROP VIEW rls_view;
-- View and Table owners are different.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b);
- GRANT SELECT ON rls_view TO rls_regress_user0;
+ GRANT SELECT ON rls_view TO regress_rls_alice;
-- Query as role that is not owner of view but is owner of table.
-- Should return records based on view owner policies.
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
SELECT * FROM rls_view;
-NOTICE: f_leak => bbb
-NOTICE: f_leak => dad
a | b
---+-----
2 | bbb
-- Query as role that is not owner of table but is owner of view.
-- Should return records based on view owner policies.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM rls_view;
-NOTICE: f_leak => bbb
-NOTICE: f_leak => dad
a | b
---+-----
2 | bbb
(2 rows)
EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
- QUERY PLAN
--------------------------------
- Subquery Scan on z1
- Filter: f_leak(z1.b)
- -> Seq Scan on z1 z1_1
- Filter: ((a % 2) = 0)
-(4 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+(5 rows)
-- Query as role that is not the owner of the table or view without permissions.
- SET SESSION AUTHORIZATION rls_regress_user2;
+ SET SESSION AUTHORIZATION regress_rls_carol;
SELECT * FROM rls_view; --fail - permission denied.
ERROR: permission denied for relation rls_view
EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied.
ERROR: permission denied for relation rls_view
-- Query as role that is not the owner of the table or view with permissions.
- SET SESSION AUTHORIZATION rls_regress_user1;
- GRANT SELECT ON rls_view TO rls_regress_user2;
+ SET SESSION AUTHORIZATION regress_rls_bob;
+ GRANT SELECT ON rls_view TO regress_rls_carol;
SELECT * FROM rls_view;
-NOTICE: f_leak => bbb
-NOTICE: f_leak => dad
a | b
---+-----
2 | bbb
(2 rows)
EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
- QUERY PLAN
--------------------------------
- Subquery Scan on z1
- Filter: f_leak(z1.b)
- -> Seq Scan on z1 z1_1
- Filter: ((a % 2) = 0)
-(4 rows)
+ QUERY PLAN
+-----------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Subquery Scan on z1
+ Filter: f_leak(z1.b)
+ -> Seq Scan on z1 z1_1
+ Filter: ((a % 2) = 0)
+(5 rows)
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
DROP VIEW rls_view;
--
-- Command specific
CREATE POLICY p3 ON x1 FOR UPDATE USING (a % 2 = 0);
CREATE POLICY p4 ON x1 FOR DELETE USING (a < 8);
ALTER TABLE x1 ENABLE ROW LEVEL SECURITY;
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC;
-NOTICE: f_leak => abc
-NOTICE: f_leak => bcd
-NOTICE: f_leak => def
-NOTICE: f_leak => efg
-NOTICE: f_leak => fgh
-NOTICE: f_leak => fgh
a | b | c
---+-----+-------------------
- 1 | abc | rls_regress_user1
- 2 | bcd | rls_regress_user1
- 4 | def | rls_regress_user2
- 5 | efg | rls_regress_user1
- 6 | fgh | rls_regress_user1
- 8 | fgh | rls_regress_user2
+ 1 | abc | regress_rls_bob
+ 2 | bcd | regress_rls_bob
+ 4 | def | regress_rls_carol
+ 5 | efg | regress_rls_bob
+ 6 | fgh | regress_rls_bob
+ 8 | fgh | regress_rls_carol
(6 rows)
UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *;
-NOTICE: f_leak => abc
-NOTICE: f_leak => bcd
-NOTICE: f_leak => def
-NOTICE: f_leak => efg
-NOTICE: f_leak => fgh
-NOTICE: f_leak => fgh
- a | b | c
----+----------+-------------------
- 1 | abc_updt | regress_rls_bob
- 2 | bcd_updt | regress_rls_bob
- 4 | def_updt | regress_rls_carol
- 5 | efg_updt | regress_rls_bob
- 6 | fgh_updt | regress_rls_bob
- 8 | fgh_updt | regress_rls_carol
+ a | b | c
+---+----------+-------------------
+ 1 | abc_updt | rls_regress_user1
+ 2 | bcd_updt | rls_regress_user1
+ 5 | efg_updt | rls_regress_user1
+ 6 | fgh_updt | rls_regress_user1
+ 8 | fgh_updt | rls_regress_user2
+ 4 | def_updt | rls_regress_user2
(6 rows)
- SET SESSION AUTHORIZATION rls_regress_user2;
+ SET SESSION AUTHORIZATION regress_rls_carol;
SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC;
-NOTICE: f_leak => cde
-NOTICE: f_leak => fgh
-NOTICE: f_leak => bcd_updt
-NOTICE: f_leak => def_updt
-NOTICE: f_leak => fgh_updt
-NOTICE: f_leak => fgh_updt
a | b | c
---+----------+-------------------
- 2 | bcd_updt | rls_regress_user1
- 3 | cde | rls_regress_user2
- 4 | def_updt | rls_regress_user2
- 6 | fgh_updt | rls_regress_user1
- 7 | fgh | rls_regress_user2
- 8 | fgh_updt | rls_regress_user2
+ 2 | bcd_updt | regress_rls_bob
+ 3 | cde | regress_rls_carol
+ 4 | def_updt | regress_rls_carol
+ 6 | fgh_updt | regress_rls_bob
+ 7 | fgh | regress_rls_carol
+ 8 | fgh_updt | regress_rls_carol
(6 rows)
UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *;
INSERT INTO y2 (SELECT x, md5(x::text) FROM generate_series(0,20) x);
CREATE POLICY p2 ON y2 USING (a % 3 = 0);
CREATE POLICY p3 ON y2 USING (a % 4 = 0);
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM y2 WHERE f_leak(b);
-NOTICE: f_leak => cfcd208495d565ef66e7dff9f98764da
-NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c
-NOTICE: f_leak => eccbc87e4b5ce2fe28308fd9f2a7baf3
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
-NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d
-NOTICE: f_leak => 45c48cce2e2d7fbdea1afc51c7c6ad26
-NOTICE: f_leak => d3d9446802a44259755d38e6d163e820
-NOTICE: f_leak => c20ad4d76fe97759aa27a0c99bff6710
-NOTICE: f_leak => aab3238922bcc25a6f606eb525ffdc56
-NOTICE: f_leak => 9bf31c7ff062936a96d3c8bd1f8f2ff3
-NOTICE: f_leak => c74d97b01eae257e44aa9d5bade97baf
-NOTICE: f_leak => 6f4922f45568161a8cdf4ad2299f6d23
-NOTICE: f_leak => 98f13708210194c475687be6106a3b84
a | b
----+----------------------------------
- 0 | cfcd208495d565ef66e7dff9f98764da
2 | c81e728d9d4c2f636f067f89cc14862c
- 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3
- 4 | a87ff679a2f3e71d9181a67b7542122c
6 | 1679091c5a880faf6fb5e6087eb1b2dc
8 | c9f0f895fb98ab9159f51fd0297e236d
9 | 45c48cce2e2d7fbdea1afc51c7c6ad26
(14 rows)
EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak('abc');
- QUERY PLAN
----------------------------------------------------------------------------------------
- Seq Scan on y2
- Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)))
-(2 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on y2
+ Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)))
+(3 rows)
CREATE TABLE test_qual_pushdown (
- abc text
+ abc text
);
INSERT INTO test_qual_pushdown VALUES ('abc'),('def');
SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc);
DROP TABLE t1 CASCADE;
RESET client_min_messages;
CREATE TABLE t1 (a integer);
- GRANT SELECT ON t1 TO rls_regress_user1, rls_regress_user2;
- CREATE POLICY p1 ON t1 TO rls_regress_user1 USING ((a % 2) = 0);
- CREATE POLICY p2 ON t1 TO rls_regress_user2 USING ((a % 4) = 0);
+ GRANT SELECT ON t1 TO regress_rls_bob, regress_rls_carol;
+ CREATE POLICY p1 ON t1 TO regress_rls_bob USING ((a % 2) = 0);
+ CREATE POLICY p2 ON t1 TO regress_rls_carol USING ((a % 4) = 0);
ALTER TABLE t1 ENABLE ROW LEVEL SECURITY;
--- Prepare as regress_rls_bob
-SET ROLE regress_rls_bob;
+-- Prepare as rls_regress_user1
+SET ROLE rls_regress_user1;
PREPARE role_inval AS SELECT * FROM t1;
-- Check plan
EXPLAIN (COSTS OFF) EXECUTE role_inval;
- QUERY PLAN
--------------------------
- Seq Scan on t1
- Filter: ((a % 2) = 0)
-(2 rows)
+ QUERY PLAN
+----------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Seq Scan on t1
+ Filter: ((a % 2) = 0)
+(4 rows)
--- Change to regress_rls_carol
-SET ROLE regress_rls_carol;
+-- Change to rls_regress_user2
+SET ROLE rls_regress_user2;
-- Check plan- should be different
EXPLAIN (COSTS OFF) EXECUTE role_inval;
- QUERY PLAN
--------------------------
- Seq Scan on t1
- Filter: ((a % 4) = 0)
-(2 rows)
+ QUERY PLAN
+----------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Seq Scan on t1
+ Filter: ((a % 4) = 0)
+(4 rows)
+
+-- Change back to rls_regress_user1
+SET ROLE rls_regress_user1;
+-- Check plan- should be back to original
+EXPLAIN (COSTS OFF) EXECUTE role_inval;
+ QUERY PLAN
+----------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Seq Scan on t1
+ Filter: ((a % 2) = 0)
+(4 rows)
+ -- Change back to regress_rls_bob
+ SET ROLE regress_rls_bob;
+ -- Check plan- should be back to original
+ EXPLAIN (COSTS OFF) EXECUTE role_inval;
+ QUERY PLAN
+ -------------------------
+ Seq Scan on t1
+ Filter: ((a % 2) = 0)
+ (2 rows)
+
--
-- CTE and RLS
--
CREATE TABLE t1 (a integer, b text);
CREATE POLICY p1 ON t1 USING (a % 2 = 0);
ALTER TABLE t1 ENABLE ROW LEVEL SECURITY;
- GRANT ALL ON t1 TO rls_regress_user1;
+ GRANT ALL ON t1 TO regress_rls_bob;
INSERT INTO t1 (SELECT x, md5(x::text) FROM generate_series(0,20) x);
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
WITH cte1 AS (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1;
-NOTICE: f_leak => cfcd208495d565ef66e7dff9f98764da
-NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
-NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d
-NOTICE: f_leak => d3d9446802a44259755d38e6d163e820
-NOTICE: f_leak => c20ad4d76fe97759aa27a0c99bff6710
-NOTICE: f_leak => aab3238922bcc25a6f606eb525ffdc56
-NOTICE: f_leak => c74d97b01eae257e44aa9d5bade97baf
-NOTICE: f_leak => 6f4922f45568161a8cdf4ad2299f6d23
-NOTICE: f_leak => 98f13708210194c475687be6106a3b84
a | b
----+----------------------------------
- 0 | cfcd208495d565ef66e7dff9f98764da
2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
6 | 1679091c5a880faf6fb5e6087eb1b2dc
8 | c9f0f895fb98ab9159f51fd0297e236d
- 10 | d3d9446802a44259755d38e6d163e820
12 | c20ad4d76fe97759aa27a0c99bff6710
+ 0 | cfcd208495d565ef66e7dff9f98764da
+ 4 | a87ff679a2f3e71d9181a67b7542122c
+ 10 | d3d9446802a44259755d38e6d163e820
14 | aab3238922bcc25a6f606eb525ffdc56
16 | c74d97b01eae257e44aa9d5bade97baf
18 | 6f4922f45568161a8cdf4ad2299f6d23
SELECT id, author, message FROM comment JOIN blog ON id = blog_id;
id | author | message
----+--------+-------------
- 4 | alice | insane!
2 | bob | who did it?
+ 4 | alice | insane!
(2 rows)
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
CREATE POLICY comment_1 ON comment USING (blog_id < 4);
ALTER TABLE comment ENABLE ROW LEVEL SECURITY;
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
-- Check RLS JOIN RLS
SELECT id, author, message FROM blog JOIN comment ON id = blog_id;
id | author | message
16 | c74d97b01eae257e44aa9d5bade97baf
18 | 6f4922f45568161a8cdf4ad2299f6d23
20 | 98f13708210194c475687be6106a3b84
- 20 | Success
-(22 rows)
+(21 rows)
EXPLAIN (COSTS OFF) SELECT * FROM t1;
- QUERY PLAN
-----------------
- Seq Scan on t1
-(1 row)
+ QUERY PLAN
+----------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Seq Scan on t1
+(3 rows)
-- Check that default deny does not apply to table owner.
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
SELECT * FROM t1;
a | b
----+----------------------------------
16 | c74d97b01eae257e44aa9d5bade97baf
18 | 6f4922f45568161a8cdf4ad2299f6d23
20 | 98f13708210194c475687be6106a3b84
- 20 | Success
-(22 rows)
+(21 rows)
EXPLAIN (COSTS OFF) SELECT * FROM t1;
- QUERY PLAN
-----------------
- Seq Scan on t1
-(1 row)
+ QUERY PLAN
+----------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Seq Scan on t1
+(3 rows)
-- Check that default deny applies to non-owner/non-superuser when RLS on.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SET row_security TO ON;
SELECT * FROM t1;
a | b
(0 rows)
EXPLAIN (COSTS OFF) SELECT * FROM t1;
- QUERY PLAN
---------------------------
- Result
- One-Time Filter: false
-(2 rows)
+ QUERY PLAN
+----------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Result
+ One-Time Filter: false
+(4 rows)
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM t1;
a | b
---+---
SET row_security TO ON;
COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok
0,cfcd208495d565ef66e7dff9f98764da
+ 1,c4ca4238a0b923820dcc509a6f75849b
2,c81e728d9d4c2f636f067f89cc14862c
+ 3,eccbc87e4b5ce2fe28308fd9f2a7baf3
4,a87ff679a2f3e71d9181a67b7542122c
+ 5,e4da3b7fbbce2345d7772b0674a318d5
6,1679091c5a880faf6fb5e6087eb1b2dc
+ 7,8f14e45fceea167a5a36dedd4bea2543
8,c9f0f895fb98ab9159f51fd0297e236d
+ 9,45c48cce2e2d7fbdea1afc51c7c6ad26
10,d3d9446802a44259755d38e6d163e820
-- Check COPY TO as user without permissions. SET row_security TO OFF;
-SET SESSION AUTHORIZATION regress_rls_carol;
+SET SESSION AUTHORIZATION rls_regress_user2;
SET row_security TO OFF;
- COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - insufficient to bypass rls
- ERROR: insufficient privilege to bypass row security.
+ COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS
+ ERROR: query would be affected by row-level security policy for table "copy_t"
SET row_security TO ON;
COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - permission denied
ERROR: permission denied for relation copy_t
SET row_security TO ON;
COPY copy_t FROM STDIN; --ok
-- Check COPY FROM as user with permissions.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
SET row_security TO OFF;
- COPY copy_t FROM STDIN; --fail - insufficient privilege to bypass rls.
- ERROR: insufficient privilege to bypass row security.
+ COPY copy_t FROM STDIN; --fail - would be affected by RLS.
+ ERROR: query would be affected by row-level security policy for table "copy_t"
SET row_security TO ON;
COPY copy_t FROM STDIN; --fail - COPY FROM not supported by RLS.
-ERROR: COPY FROM not supported with row-level security
-HINT: Use INSERT statements instead.
--- Check COPY FROM as user with permissions and BYPASSRLS
-SET SESSION AUTHORIZATION regress_rls_exempt_user;
-SET row_security TO ON;
+ERROR: COPY FROM not supported with row level security.
+HINT: Use direct INSERT statements instead.
+-- Check COPY TO as user with permissions and BYPASSRLS
+SET SESSION AUTHORIZATION rls_regress_exempt_user;
+SET row_security TO OFF;
COPY copy_t FROM STDIN; --ok
+SET row_security TO ON;
+COPY copy_t FROM STDIN; --fail - COPY FROM not supported by RLS.
+ERROR: COPY FROM not supported with row level security.
+HINT: Use direct INSERT statements instead.
-- Check COPY FROM as user without permissions.
- SET SESSION AUTHORIZATION rls_regress_user2;
+ SET SESSION AUTHORIZATION regress_rls_carol;
SET row_security TO OFF;
COPY copy_t FROM STDIN; --fail - permission denied.
ERROR: permission denied for relation copy_t
RESET SESSION AUTHORIZATION;
-- Suppress NOTICE messages when doing a cascaded drop.
SET client_min_messages TO 'warning';
- DROP SCHEMA rls_regress_schema CASCADE;
+ DROP SCHEMA regress_rls_schema CASCADE;
RESET client_min_messages;
-DROP USER regress_rls_alice;
-DROP USER regress_rls_bob;
-DROP USER regress_rls_carol;
-DROP USER regress_rls_exempt_user;
-DROP ROLE regress_rls_group1;
-DROP ROLE regress_rls_group2;
+DROP USER rls_regress_user0;
+DROP USER rls_regress_user1;
+DROP USER rls_regress_user2;
+DROP USER rls_regress_exempt_user;
+DROP ROLE rls_regress_group1;
+DROP ROLE rls_regress_group2;
-- Arrange to have a few policies left over, for testing
-- pg_dump/pg_restore
-CREATE SCHEMA regress_rls_schema;
+CREATE SCHEMA rls_regress_schema;
CREATE TABLE rls_tbl (c1 int);
ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY;
CREATE POLICY p1 ON rls_tbl USING (c1 > 5);
SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0);
id
----
+ 3
+ 4
+ 5
6
- 7
8
- 9
- 7
- (4 rows)
+ (6 rows)
SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (50) REPEATABLE (0);
id
0
(1 row)
+ -- check behavior during rescans, as well as correct handling of min/max pct
+ select * from
+ (values (0),(100)) v(pct),
+ lateral (select count(*) from tenk1 tablesample bernoulli (pct)) ss;
+ pct | count
+ -----+-------
+ 0 | 0
+ 100 | 10000
+ (2 rows)
+
+ select * from
+ (values (0),(100)) v(pct),
+ lateral (select count(*) from tenk1 tablesample system (pct)) ss;
+ pct | count
+ -----+-------
+ 0 | 0
+ 100 | 10000
+ (2 rows)
+
+ explain (costs off)
+ select pct, count(unique1) from
+ (values (0),(100)) v(pct),
+ lateral (select * from tenk1 tablesample bernoulli (pct)) ss
+ group by pct;
+ QUERY PLAN
+ --------------------------------------------------------
+ HashAggregate
+ Group Key: "*VALUES*".column1
+ -> Nested Loop
+ -> Values Scan on "*VALUES*"
+ -> Sample Scan on tenk1
+ Sampling: bernoulli ("*VALUES*".column1)
+ (6 rows)
+
+ select pct, count(unique1) from
+ (values (0),(100)) v(pct),
+ lateral (select * from tenk1 tablesample bernoulli (pct)) ss
+ group by pct;
+ pct | count
+ -----+-------
+ 100 | 10000
+ (1 row)
+
+ select pct, count(unique1) from
+ (values (0),(100)) v(pct),
+ lateral (select * from tenk1 tablesample system (pct)) ss
+ group by pct;
+ pct | count
+ -----+-------
+ 100 | 10000
+ (1 row)
+
++-- check that collations get assigned within the tablesample arguments
++SELECT count(*) FROM test_tablesample TABLESAMPLE bernoulli (('1'::text < '0'::text)::int);
++ count
++-------
++ 0
++(1 row)
++
+-- check behavior during rescans, as well as correct handling of min/max pct
+select * from
+ (values (0),(100)) v(pct),
+ lateral (select count(*) from tenk1 tablesample bernoulli (pct)) ss;
+ pct | count
+-----+-------
+ 0 | 0
+ 100 | 0
+(2 rows)
+
+select * from
+ (values (0),(100)) v(pct),
+ lateral (select count(*) from tenk1 tablesample system (pct)) ss;
+ pct | count
+-----+-------
+ 0 | 0
+ 100 | 0
+(2 rows)
+
+explain (costs off)
+select pct, count(unique1) from
+ (values (0),(100)) v(pct),
+ lateral (select * from tenk1 tablesample bernoulli (pct)) ss
+ group by pct;
+ QUERY PLAN
+-----------------------------------------------------------------------
+ HashAggregate
+ Group Key: "*VALUES*".column1
+ -> Nested Loop
+ -> Values Scan on "*VALUES*"
+ -> Materialize
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Sample Scan on tenk1
+ Sampling: bernoulli ("*VALUES*".column1)
+(8 rows)
+
+select pct, count(unique1) from
+ (values (0),(100)) v(pct),
+ lateral (select * from tenk1 tablesample bernoulli (pct)) ss
+ group by pct;
+ pct | count
+-----+-------
+(0 rows)
+
+select pct, count(unique1) from
+ (values (0),(100)) v(pct),
+ lateral (select * from tenk1 tablesample system (pct)) ss
+ group by pct;
+ pct | count
+-----+-------
+(0 rows)
+
-- errors
SELECT id FROM test_tablesample TABLESAMPLE FOOBAR (1);
ERROR: tablesample method foobar does not exist
| Wed Dec 31 17:32:01 1997
| Fri Dec 31 17:32:01 1999
| Sat Jan 01 17:32:01 2000
+ | Wed Mar 15 02:14:05 2000
+ | Wed Mar 15 03:14:04 2000
+ | Wed Mar 15 08:14:01 2000
+ | Wed Mar 15 12:14:03 2000
+ | Wed Mar 15 13:14:02 2000
| Sun Dec 31 17:32:01 2000
| Mon Jan 01 17:32:01 2001
+ | Sat Sep 22 18:19:20 2001
+ | Sat Feb 16 17:32:01 2097
+ | infinity
(65 rows)
+ -- Check behavior at the lower boundary of the timestamp range
+ SELECT '4714-11-24 00:00:00 BC'::timestamp;
+ timestamp
+ -----------------------------
+ Mon Nov 24 00:00:00 4714 BC
+ (1 row)
+
+ SELECT '4714-11-23 23:59:59 BC'::timestamp; -- out of range
+ ERROR: timestamp out of range: "4714-11-23 23:59:59 BC"
+ LINE 1: SELECT '4714-11-23 23:59:59 BC'::timestamp;
+ ^
+ -- The upper boundary differs between integer and float timestamps, so no check
-- Demonstrate functions and operators
SELECT '' AS "48", d1 FROM TIMESTAMP_TBL
- WHERE d1 > timestamp without time zone '1997-01-02';
+ WHERE d1 > timestamp without time zone '1997-01-02' ORDER BY d1;
48 | d1
----+----------------------------
- | infinity
+ | Thu Jan 02 03:04:05 1997
+ | Mon Feb 10 17:32:00 1997
| Mon Feb 10 17:32:01 1997
| Mon Feb 10 17:32:01 1997
- | Mon Feb 10 17:32:02 1997
- | Mon Feb 10 17:32:01.4 1997
- | Mon Feb 10 17:32:01.5 1997
- | Mon Feb 10 17:32:01.6 1997
- | Thu Jan 02 03:04:05 1997
| Mon Feb 10 17:32:01 1997
| Mon Feb 10 17:32:01 1997
| Mon Feb 10 17:32:01 1997
| 1,997 1997 997 97 7 20 1 02 07 045 14 6 2450494
| 1,997 1997 997 97 7 20 1 02 07 046 15 7 2450495
| 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
+ | 0,097 0097 097 97 7 -01 1 02 07 047 16 3 1686042
+ | 0,097 0097 097 97 7 01 1 02 07 047 16 7 1756536
+ | 0,597 0597 597 97 7 06 1 02 07 047 16 5 1939157
+ | 1,097 1097 097 97 7 11 1 02 07 047 16 3 2121778
+ | 1,697 1697 697 97 7 17 1 02 07 047 16 7 2340924
+ | 1,797 1797 797 97 7 18 1 02 07 047 16 5 2377448
+ | 1,897 1897 897 97 7 19 1 02 07 047 16 3 2413972
| 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
- | 2,097 2097 097 97 7 21 1 02 07 047 16 7 2487021
- | 1,996 1996 996 96 6 20 1 02 09 059 28 4 2450142
- | 1,996 1996 996 96 6 20 1 02 09 060 29 5 2450143
- | 1,996 1996 996 96 6 20 1 03 09 061 01 6 2450144
- | 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448
- | 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449
- | 1,997 1997 997 97 7 20 1 01 01 001 01 4 2450450
| 1,997 1997 997 97 7 20 1 02 09 059 28 6 2450508
| 1,997 1997 997 97 7 20 1 03 09 060 01 7 2450509
+ | 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
+ | 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
| 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813
| 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814
| 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544
| Wed Dec 31 17:32:01 1997 PST
| Fri Dec 31 17:32:01 1999 PST
| Sat Jan 01 17:32:01 2000 PST
+ | Wed Mar 15 01:14:05 2000 PST
+ | Wed Mar 15 02:14:03 2000 PST
+ | Wed Mar 15 03:14:04 2000 PST
+ | Wed Mar 15 04:14:02 2000 PST
+ | Wed Mar 15 08:14:01 2000 PST
| Sun Dec 31 17:32:01 2000 PST
| Mon Jan 01 17:32:01 2001 PST
+ | Sat Sep 22 18:19:20 2001 PDT
+ | Sat Feb 16 17:32:01 2097 PST
+ | infinity
(66 rows)
+ -- Check behavior at the lower boundary of the timestamp range
+ SELECT '4714-11-24 00:00:00+00 BC'::timestamptz;
+ timestamptz
+ ---------------------------------
+ Sun Nov 23 16:00:00 4714 PST BC
+ (1 row)
+
+ SELECT '4714-11-23 16:00:00-08 BC'::timestamptz;
+ timestamptz
+ ---------------------------------
+ Sun Nov 23 16:00:00 4714 PST BC
+ (1 row)
+
+ SELECT 'Sun Nov 23 16:00:00 4714 PST BC'::timestamptz;
+ timestamptz
+ ---------------------------------
+ Sun Nov 23 16:00:00 4714 PST BC
+ (1 row)
+
+ SELECT '4714-11-23 23:59:59+00 BC'::timestamptz; -- out of range
+ ERROR: timestamp out of range: "4714-11-23 23:59:59+00 BC"
+ LINE 1: SELECT '4714-11-23 23:59:59+00 BC'::timestamptz;
+ ^
+ -- The upper boundary differs between integer and float timestamps, so no check
-- Demonstrate functions and operators
SELECT '' AS "48", d1 FROM TIMESTAMPTZ_TBL
- WHERE d1 > timestamp with time zone '1997-01-02';
+ WHERE d1 > timestamp with time zone '1997-01-02' ORDER BY d1;
48 | d1
----+--------------------------------
- | infinity
+ | Thu Jan 02 03:04:05 1997 PST
+ | Mon Feb 10 09:32:01 1997 PST
+ | Mon Feb 10 09:32:01 1997 PST
+ | Mon Feb 10 09:32:01 1997 PST
+ | Mon Feb 10 14:32:01 1997 PST
+ | Mon Feb 10 17:32:00 1997 PST
| Mon Feb 10 17:32:01 1997 PST
| Mon Feb 10 17:32:01 1997 PST
- | Mon Feb 10 17:32:02 1997 PST
- | Mon Feb 10 17:32:01.4 1997 PST
- | Mon Feb 10 17:32:01.5 1997 PST
- | Mon Feb 10 17:32:01.6 1997 PST
- | Thu Jan 02 03:04:05 1997 PST
| Mon Feb 10 17:32:01 1997 PST
| Mon Feb 10 17:32:01 1997 PST
| Mon Feb 10 17:32:01 1997 PST
| 1,997 1997 997 97 7 20 1 02 07 045 14 6 2450494
| 1,997 1997 997 97 7 20 1 02 07 046 15 7 2450495
| 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
+ | 0,097 0097 097 97 7 -01 1 02 07 047 16 3 1686042
+ | 0,097 0097 097 97 7 01 1 02 07 047 16 7 1756536
+ | 0,597 0597 597 97 7 06 1 02 07 047 16 5 1939157
+ | 1,097 1097 097 97 7 11 1 02 07 047 16 3 2121778
+ | 1,697 1697 697 97 7 17 1 02 07 047 16 7 2340924
+ | 1,797 1797 797 97 7 18 1 02 07 047 16 5 2377448
+ | 1,897 1897 897 97 7 19 1 02 07 047 16 3 2413972
| 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
- | 2,097 2097 097 97 7 21 1 02 07 047 16 7 2487021
- | 1,996 1996 996 96 6 20 1 02 09 059 28 4 2450142
- | 1,996 1996 996 96 6 20 1 02 09 060 29 5 2450143
- | 1,996 1996 996 96 6 20 1 03 09 061 01 6 2450144
- | 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448
- | 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449
- | 1,997 1997 997 97 7 20 1 01 01 001 01 4 2450450
| 1,997 1997 997 97 7 20 1 02 09 059 28 6 2450508
| 1,997 1997 997 97 7 20 1 03 09 060 01 7 2450509
+ | 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
+ | 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
+ | 1,997 1997 997 97 7 20 3 07 28 191 10 5 2450640
| 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813
| 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814
| 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544
| 1998 998 98 8 1 3 3
| 1999 999 99 9 52 362 5
| 1999 999 99 9 52 363 6
+ | 2000 0 0 0 11 73 3
+ | 2000 0 0 0 11 73 3
+ | 2000 0 0 0 11 73 3
+ | 2000 0 0 0 11 73 3
+ | 2000 0 0 0 11 73 3
| 2000 0 0 0 52 364 7
| 2001 1 1 1 1 1 1
+ | 2001 1 1 1 38 265 6
+ | 2097 97 97 7 7 48 6
+ |
(66 rows)
+ -- Check OF with various zone offsets, particularly fractional hours
+ SET timezone = '00:00';
+ SELECT to_char(now(), 'OF');
+ to_char
+ ---------
+ +00
+ (1 row)
+
+ SET timezone = '+02:00';
+ SELECT to_char(now(), 'OF');
+ to_char
+ ---------
+ -02
+ (1 row)
+
+ SET timezone = '-13:00';
+ SELECT to_char(now(), 'OF');
+ to_char
+ ---------
+ +13
+ (1 row)
+
+ SET timezone = '-00:30';
+ SELECT to_char(now(), 'OF');
+ to_char
+ ---------
+ +00:30
+ (1 row)
+
+ SET timezone = '00:30';
+ SELECT to_char(now(), 'OF');
+ to_char
+ ---------
+ -00:30
+ (1 row)
+
+ SET timezone = '-04:30';
+ SELECT to_char(now(), 'OF');
+ to_char
+ ---------
+ +04:30
+ (1 row)
+
+ SET timezone = '04:30';
+ SELECT to_char(now(), 'OF');
+ to_char
+ ---------
+ -04:30
+ (1 row)
+
+ RESET timezone;
CREATE TABLE TIMESTAMPTZ_TST (a int , b timestamptz);
-- Test year field value with len > 4
INSERT INTO TIMESTAMPTZ_TST VALUES(1, 'Sat Mar 12 23:58:48 1000 IST');
-1 | Row -1
0 | Row 0
1 | Row 1
- 2 | Updated row 2
+ 2 | Row 2
(5 rows)
-EXPLAIN (costs off)
+EXPLAIN (costs off, nodes off)
UPDATE rw_view1 v SET bb='Updated row 2' WHERE rw_view1_aa(v)=2
RETURNING rw_view1_aa(v), v.bb;
- QUERY PLAN
---------------------------------------------------
- Update on base_tbl
- -> Index Scan using base_tbl_pkey on base_tbl
- Index Cond: (a = 2)
-(3 rows)
-
+ERROR: function rw_view1_aa(rw_view1) does not exist
+LINE 2: UPDATE rw_view1 v SET bb='Updated row 2' WHERE rw_view1_aa(v...
+ ^
+HINT: No function matches the given name and argument types. You might need to add explicit type casts.
DROP TABLE base_tbl CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to view rw_view1
-drop cascades to function rw_view1_aa(rw_view1)
+NOTICE: drop cascades to view rw_view1
-- permissions checks
- CREATE USER view_user1;
- CREATE USER view_user2;
- SET SESSION AUTHORIZATION view_user1;
- CREATE TABLE base_tbl(a int, b text, c float) DISTRIBUTE BY REPLICATION;
+ CREATE USER regress_view_user1;
+ CREATE USER regress_view_user2;
+ SET SESSION AUTHORIZATION regress_view_user1;
+ CREATE TABLE base_tbl(a int, b text, c float);
INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0);
CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl;
INSERT INTO rw_view1 VALUES ('Row 2', 2.0, 2);
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to view rw_view1
drop cascades to view rw_view2
- DROP USER view_user1;
- DROP USER view_user2;
+ DROP USER regress_view_user1;
+ DROP USER regress_view_user2;
-- column defaults
-CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified', c serial);
+CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified', c serial) DISTRIBUTE BY REPLICATION;
INSERT INTO base_tbl VALUES (1, 'Row 1');
INSERT INTO base_tbl VALUES (2, 'Row 2');
INSERT INTO base_tbl VALUES (3);
SELECT * FROM base_tbl b
WHERE EXISTS(SELECT 1 FROM ref_tbl r WHERE r.a = b.a)
WITH CHECK OPTION;
-INSERT INTO rw_view1 VALUES (5); -- ok
+--INSERT INTO rw_view1 VALUES (5); -- ok
INSERT INTO rw_view1 VALUES (15); -- should fail
- ERROR: new row violates WITH CHECK OPTION for "rw_view1"
+ ERROR: new row violates check option for view "rw_view1"
DETAIL: Failing row contains (15).
UPDATE rw_view1 SET a = a + 5; -- ok
UPDATE rw_view1 SET a = a + 5; -- should fail
INSERT INTO vaccluster VALUES (1), (2);
ANALYZE vaccluster;
ERROR: ANALYZE cannot be executed from VACUUM or ANALYZE
-CONTEXT: SQL function "do_analyze" statement 1
-SQL function "wrap_do_analyze" statement 1
VACUUM FULL pg_am;
VACUUM FULL pg_class;
-VACUUM FULL pg_database;
+VACUUM FULL pg_catalog.pg_database;
VACUUM FULL vaccluster;
ERROR: ANALYZE cannot be executed from VACUUM or ANALYZE
-CONTEXT: SQL function "do_analyze" statement 1
-SQL function "wrap_do_analyze" statement 1
VACUUM FULL vactst;
+ VACUUM (DISABLE_PAGE_SKIPPING) vaccluster;
DROP TABLE vaccluster;
DROP TABLE vactst;
-- lo_creat(mode integer) returns oid
-- The mode arg to lo_creat is unused, some vestigal holdover from ancient times
-- returns the large object id
-INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42);
+INSERT INTO lotest_stash_values (loid) VALUES( lo_creat(42) );
-- Test ALTER LARGE OBJECT
- CREATE ROLE regresslo;
+ CREATE ROLE regress_lo_user;
DO $$
BEGIN
EXECUTE 'ALTER LARGE OBJECT ' || (select loid from lotest_stash_values)
(1 row)
PREPARE selectsource(int) AS SELECT $1;
- CREATE TABLE testschema.asexecute TABLESPACE testspace
+ CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace
AS EXECUTE selectsource(2);
+ERROR: CREATE TABLE AS EXECUTE not yet supported
SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c
where c.reltablespace = t.oid AND c.relname = 'asexecute';
- relname | spcname
------------+------------------
- asexecute | regress_tblspace
-(1 row)
+ relname | spcname
+---------+---------
+(0 rows)
-- index
- CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE testspace;
+ CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace;
SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c
where c.reltablespace = t.oid AND c.relname = 'foo_idx';
- relname | spcname
- ---------+-----------
- foo_idx | testspace
+ relname | spcname
+ ---------+------------------
+ foo_idx | regress_tblspace
(1 row)
-- let's try moving a table from one place to another
(1 row)
-- Will fail with bad path
- CREATE TABLESPACE badspace LOCATION '/no/such/location';
+ CREATE TABLESPACE regress_badspace LOCATION '/no/such/location';
ERROR: directory "/no/such/location" does not exist
-- No such tablespace
- CREATE TABLE bar (i int) TABLESPACE nosuchspace;
- ERROR: tablespace "nosuchspace" does not exist
+ CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace;
+ ERROR: tablespace "regress_nosuchspace" does not exist
-- Fail, not empty
- DROP TABLESPACE testspace;
- ERROR: tablespace "testspace" is not empty
- CREATE ROLE tablespace_testuser1 login;
- CREATE ROLE tablespace_testuser2 login;
- ALTER TABLESPACE testspace OWNER TO tablespace_testuser1;
- SET SESSION ROLE tablespace_testuser2;
- CREATE TABLE tablespace_table (i int) TABLESPACE testspace; -- fail
- ERROR: permission denied for tablespace testspace
+ DROP TABLESPACE regress_tblspace;
+ ERROR: tablespace "regress_tblspace" is not empty
+ CREATE ROLE regress_tablespace_user1 login;
+ CREATE ROLE regress_tablespace_user2 login;
+ ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1;
+ SET SESSION ROLE regress_tablespace_user2;
+ CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail
+ ERROR: permission denied for tablespace regress_tblspace
RESET ROLE;
- ALTER TABLESPACE testspace RENAME TO testspace_renamed;
- ALTER TABLE ALL IN TABLESPACE testspace_renamed SET TABLESPACE pg_default;
- ALTER INDEX ALL IN TABLESPACE testspace_renamed SET TABLESPACE pg_default;
+ ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed;
+ ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default;
+ ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default;
-- Should show notice that nothing was done
- ALTER TABLE ALL IN TABLESPACE testspace_renamed SET TABLESPACE pg_default;
- NOTICE: no matching relations in tablespace "testspace_renamed" found
+ ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default;
+ NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found
-- Should succeed
- DROP TABLESPACE testspace_renamed;
+ DROP TABLESPACE regress_tblspace_renamed;
DROP SCHEMA testschema CASCADE;
-NOTICE: drop cascades to 4 other objects
+NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table testschema.foo
drop cascades to table testschema.asselect
-drop cascades to table testschema.asexecute
drop cascades to table testschema.atable
- DROP ROLE tablespace_testuser1;
- DROP ROLE tablespace_testuser2;
+ DROP ROLE regress_tablespace_user1;
+ DROP ROLE regress_tablespace_user2;
# ----------
# Another group of parallel tests
# ----------
- test: select_views portals_p2 foreign_key cluster dependency guc bitmapops combocid tsearch tsdicts foreign_data window xmlmap functional_deps json jsonb indirect_toast equivclass
-test: select_views portals_p2 foreign_key cluster dependency guc bitmapops combocid tsearch tsdicts foreign_data window xmlmap functional_deps advisory_lock json jsonb json_encoding indirect_toast equivclass
++test: select_views portals_p2 foreign_key cluster dependency guc bitmapops combocid tsearch tsdicts foreign_data window xmlmap functional_deps json jsonb json_encoding indirect_toast equivclass
+
+# ----------
+# As XL uses advisory locks internally running this test separately.
+# ----------
+test: advisory_lock
+#Separate out as similar table foo is created in others below.
+test: rangefuncs
# ----------
# Another group of parallel tests
# NB: temp.sql does a reconnect which transiently uses 2 connections,
if (temp_instance)
{
+#ifndef PGXC
FILE *pg_conf;
+#endif
+ const char *env_wait;
+ int wait_seconds;
/*
* Prepare the temp instance
progname, strerror(errno));
exit(2);
}
+#endif
/*
- * Wait till postmaster is able to accept connections (normally only a
- * second or so, but Cygwin is reportedly *much* slower). Don't wait
- * forever, however.
+ * Wait till postmaster is able to accept connections; normally this
+ * is only a second or so, but Cygwin is reportedly *much* slower, and
+ * test builds using Valgrind or similar tools might be too. Hence,
+ * allow the default timeout of 60 seconds to be overridden from the
+ * PGCTLTIMEOUT environment variable.
*/
- for (i = 0; i < 60; i++)
+ env_wait = getenv("PGCTLTIMEOUT");
+ if (env_wait != NULL)
+ {
+ wait_seconds = atoi(env_wait);
+ if (wait_seconds <= 0)
+ wait_seconds = 60;
+ }
+ else
+ wait_seconds = 60;
+
+ for (i = 0; i < wait_seconds; i++)
{
+
+#ifdef PGXC
+ /* Done if psql succeeds for each node */
+ if (check_node_running(PGXC_COORD_1) &&
+ check_node_running(PGXC_COORD_2) &&
+ check_node_running(PGXC_DATANODE_1) &&
+ check_node_running(PGXC_DATANODE_2))
+ break;
+
+ /* Check node failure */
+ check_node_fail(PGXC_COORD_1);
+ check_node_fail(PGXC_COORD_2);
+ check_node_fail(PGXC_DATANODE_1);
+ check_node_fail(PGXC_DATANODE_2);
+#else
/* Done if psql succeeds */
if (system(buf2) == 0)
break;
pg_usleep(1000000L);
}
- if (i >= 60)
+ if (i >= wait_seconds)
{
- fprintf(stderr, _("\n%s: postmaster did not respond within 60 seconds\nExamine %s/log/postmaster.log for the reason\n"), progname, outputdir);
+#ifdef PGXC
+ /* If one node fails, all fail */
+ kill_node(PGXC_COORD_1);
+ kill_node(PGXC_COORD_2);
+ kill_node(PGXC_DATANODE_1);
+ kill_node(PGXC_DATANODE_2);
+#else
+ fprintf(stderr, _("\n%s: postmaster did not respond within %d seconds\nExamine %s/log/postmaster.log for the reason\n"),
+ progname, wait_seconds, outputdir);
/*
* If we get here, the postmaster is probably wedged somewhere in
test: case
test: join
test: aggregates
- test: groupingsets
test: transactions
-ignore: random
test: random
test: portals
test: arrays
--
-- Basic cases
-explain (costs off)
+explain (costs off, nodes off)
select min(unique1) from tenk1;
select min(unique1) from tenk1;
-explain (costs off)
+explain (costs off, nodes off)
select max(unique1) from tenk1;
select max(unique1) from tenk1;
-explain (costs off)
+explain (costs off, nodes off)
select max(unique1) from tenk1 where unique1 < 42;
select max(unique1) from tenk1 where unique1 < 42;
-explain (costs off)
+explain (costs off, nodes off)
select max(unique1) from tenk1 where unique1 > 42;
select max(unique1) from tenk1 where unique1 > 42;
- explain (costs off, nodes off)
+
+ -- the planner may choose a generic aggregate here if parallel query is
+ -- enabled, since that plan will be parallel safe and the "optimized"
+ -- plan, which has almost identical cost, will not be. we want to test
+ -- the optimized plan, so temporarily disable parallel query.
+ begin;
+ set local max_parallel_workers_per_gather = 0;
+ explain (costs off)
select max(unique1) from tenk1 where unique1 > 42000;
select max(unique1) from tenk1 where unique1 > 42000;
+ rollback;
-- multi-column index (uses tenk1_thous_tenthous)
-explain (costs off)
+explain (costs off, nodes off)
select max(tenthous) from tenk1 where thousand = 33;
select max(tenthous) from tenk1 where thousand = 33;
-explain (costs off)
+explain (costs off, nodes off)
select min(tenthous) from tenk1 where thousand = 33;
select min(tenthous) from tenk1 where thousand = 33;
select least_agg(q1,q2) from int8_tbl;
select least_agg(variadic array[q1,q2]) from int8_tbl;
+-- int8 aggregates for distributed tables
+
+CREATE TABLE int8_tbl_aggtest AS SELECT * FROM int8_tbl;
+
+SELECT avg(q1) FROM int8_tbl_aggtest;
+SELECT sum(q1) FROM int8_tbl_aggtest;
+SELECT max(q1) FROM int8_tbl_aggtest;
+SELECT min(q1) FROM int8_tbl_aggtest;
+SELECT stddev_pop(q1) FROM int8_tbl_aggtest;
+SELECT stddev_samp(q1) FROM int8_tbl_aggtest;
+SELECT var_pop(q1) FROM int8_tbl_aggtest;
+SELECT var_samp(q1) FROM int8_tbl_aggtest;
+
+DROP TABLE int8_tbl_aggtest;
+
+ -- test aggregates with common transition functions share the same states
+ begin work;
+
+ create type avg_state as (total bigint, count bigint);
+
+ create or replace function avg_transfn(state avg_state, n int) returns avg_state as
+ $$
+ declare new_state avg_state;
+ begin
+ raise notice 'avg_transfn called with %', n;
+ if state is null then
+ if n is not null then
+ new_state.total := n;
+ new_state.count := 1;
+ return new_state;
+ end if;
+ return null;
+ elsif n is not null then
+ state.total := state.total + n;
+ state.count := state.count + 1;
+ return state;
+ end if;
+
+ return null;
+ end
+ $$ language plpgsql;
+
+ create function avg_finalfn(state avg_state) returns int4 as
+ $$
+ begin
+ if state is null then
+ return NULL;
+ else
+ return state.total / state.count;
+ end if;
+ end
+ $$ language plpgsql;
+
+ create function sum_finalfn(state avg_state) returns int4 as
+ $$
+ begin
+ if state is null then
+ return NULL;
+ else
+ return state.total;
+ end if;
+ end
+ $$ language plpgsql;
+
+ create aggregate my_avg(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn
+ );
+
+ create aggregate my_sum(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = sum_finalfn
+ );
+
+ -- aggregate state should be shared as aggs are the same.
+ select my_avg(one),my_avg(one) from (values(1),(3)) t(one);
+
+ -- aggregate state should be shared as transfn is the same for both aggs.
+ select my_avg(one),my_sum(one) from (values(1),(3)) t(one);
+
+ -- shouldn't share states due to the distinctness not matching.
+ select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one);
+
+ -- shouldn't share states due to the filter clause not matching.
+ select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one);
+
+ -- this should not share the state due to different input columns.
+ select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two);
+
+ -- test that aggs with the same sfunc and initcond share the same agg state
+ create aggregate my_sum_init(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = sum_finalfn,
+ initcond = '(10,0)'
+ );
+
+ create aggregate my_avg_init(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn,
+ initcond = '(10,0)'
+ );
+
+ create aggregate my_avg_init2(int4)
+ (
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn,
+ initcond = '(4,0)'
+ );
+
+ -- state should be shared if INITCONDs are matching
+ select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one);
+
+ -- Varying INITCONDs should cause the states not to be shared.
+ select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one);
+
+ rollback;
+
+ -- test aggregate state sharing to ensure it works if one aggregate has a
+ -- finalfn and the other one has none.
+ begin work;
+
+ create or replace function sum_transfn(state int4, n int4) returns int4 as
+ $$
+ declare new_state int4;
+ begin
+ raise notice 'sum_transfn called with %', n;
+ if state is null then
+ if n is not null then
+ new_state := n;
+ return new_state;
+ end if;
+ return null;
+ elsif n is not null then
+ state := state + n;
+ return state;
+ end if;
+
+ return null;
+ end
+ $$ language plpgsql;
+
+ create function halfsum_finalfn(state int4) returns int4 as
+ $$
+ begin
+ if state is null then
+ return NULL;
+ else
+ return state / 2;
+ end if;
+ end
+ $$ language plpgsql;
+
+ create aggregate my_sum(int4)
+ (
+ stype = int4,
+ sfunc = sum_transfn
+ );
+
+ create aggregate my_half_sum(int4)
+ (
+ stype = int4,
+ sfunc = sum_transfn,
+ finalfunc = halfsum_finalfn
+ );
+
+ -- Agg state should be shared even though my_sum has no finalfn
+ select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one);
+
+ rollback;
create table nv_child_2011 () inherits (nv_parent);
alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid;
alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid;
-explain (costs off) select * from nv_parent where d between '2011-08-01' and '2011-08-31';
+explain (costs off, nodes off) select * from nv_parent where d between '2011-08-01' and '2011-08-31';
create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent);
-explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date;
-explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
+explain (costs off, nodes off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date;
+explain (costs off, nodes off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
-- after validation, the constraint should be used
alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check;
-explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
+explain (costs off, nodes off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
+ -- add an inherited NOT VALID constraint
+ alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid;
+ \d nv_child_2009
+ -- we leave nv_parent and children around to help test pg_dump logic
-- Foreign key adding test with mixed types
drop table child;
drop table parent;
+ -- check error cases for inheritance column merging
+ create table parent (a float8, b numeric(10,4), c text collate "C");
+
+ create table child (a float4) inherits (parent); -- fail
+ create table child (b decimal(10,7)) inherits (parent); -- fail
+ create table child (c text collate "POSIX") inherits (parent); -- fail
+ create table child (a double precision, b decimal(10,4)) inherits (parent);
+
+ drop table child;
+ drop table parent;
+
-- test copy in/out
-create table test (a int4, b int4, c int4);
+create table test (a int4, b int4, c int4) distribute by roundrobin;
insert into test values (1,2,3);
alter table test drop a;
copy test to stdout;
b[1:1][1:2][1:2],
c[1:2],
d[1:1][2:2]
- FROM arrtest;
+ FROM arrtest
+ ORDER BY a, b, c;
+ SELECT b[1:1][2][2],
+ d[1:1][2]
+ FROM arrtest;
+
INSERT INTO arrtest(a) VALUES('{1,null,3}');
-SELECT a FROM arrtest;
+SELECT a FROM arrtest ORDER BY 1;
UPDATE arrtest SET a[4] = NULL WHERE a[2] IS NULL;
-SELECT a FROM arrtest WHERE a[2] IS NULL;
+SELECT a FROM arrtest WHERE a[2] IS NULL ORDER BY 1;
DELETE FROM arrtest WHERE a[2] IS NULL AND b IS NULL;
-SELECT a,b,c FROM arrtest;
+SELECT a,b,c FROM arrtest ORDER BY a, b, c;
+ -- test mixed slice/scalar subscripting
+ select '{{1,2,3},{4,5,6},{7,8,9}}'::int[];
+ select ('{{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
+ select '[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[];
+ select ('[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
+
+ -- test slices with empty lower and/or upper index
+ CREATE TEMP TABLE arrtest_s (
+ a int2[],
+ b int2[][]
+ );
+ INSERT INTO arrtest_s VALUES ('{1,2,3,4,5}', '{{1,2,3}, {4,5,6}, {7,8,9}}');
+ INSERT INTO arrtest_s VALUES ('[0:4]={1,2,3,4,5}', '[0:2][0:2]={{1,2,3}, {4,5,6}, {7,8,9}}');
+
+ SELECT * FROM arrtest_s;
+ SELECT a[:3], b[:2][:2] FROM arrtest_s;
+ SELECT a[2:], b[2:][2:] FROM arrtest_s;
+ SELECT a[:], b[:] FROM arrtest_s;
+
+ -- updates
+ UPDATE arrtest_s SET a[:3] = '{11, 12, 13}', b[:2][:2] = '{{11,12}, {14,15}}'
+ WHERE array_lower(a,1) = 1;
+ SELECT * FROM arrtest_s;
+ UPDATE arrtest_s SET a[3:] = '{23, 24, 25}', b[2:][2:] = '{{25,26}, {28,29}}';
+ SELECT * FROM arrtest_s;
+ UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}';
+ SELECT * FROM arrtest_s;
+ UPDATE arrtest_s SET a[:] = '{23, 24, 25}'; -- fail, too small
+ INSERT INTO arrtest_s VALUES(NULL, NULL);
+ UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}'; -- fail, no good with null
+
+ -- check with fixed-length-array type, such as point
+ SELECT f1[0:1] FROM POINT_TBL;
+ SELECT f1[0:] FROM POINT_TBL;
+ SELECT f1[:1] FROM POINT_TBL;
+ SELECT f1[:] FROM POINT_TBL;
+
--
-- test array extension
--
-- wholly-contained
SELECT '' AS one, b1.*, b2.*
- FROM BOX_TBL b1, BOX_TBL b2
- WHERE b1.f1 @> b2.f1 and not b1.f1 ~= b2.f1;
+ FROM BOX_TBL b1, BOX_TBL b2
+ WHERE b1.f1 @> b2.f1 and not b1.f1 ~= b2.f1
+ ORDER BY (b1.f1[0])[0], (b1.f1[0])[1], (b1.f1[2])[0], (b1.f1[2])[1], (b2.f1[0])[0], (b2.f1[0])[1], (b2.f1[2])[0], (b2.f1[2])[1];
-SELECT '' AS four, height(f1), width(f1) FROM BOX_TBL;
+SELECT '' AS four, height(f1), width(f1) FROM BOX_TBL ORDER BY (f1[0])[0], (f1[0])[1], (f1[2])[0], (f1[2])[1];
+
+ --
+ -- Test the SP-GiST index
+ --
+
+ CREATE TEMPORARY TABLE box_temp (f1 box);
+
+ INSERT INTO box_temp
+ SELECT box(point(i, i), point(i * 2, i * 2))
+ FROM generate_series(1, 50) AS i;
+
+ CREATE INDEX box_spgist ON box_temp USING spgist (f1);
+
+ INSERT INTO box_temp
+ VALUES (NULL),
+ ('(0,0)(0,100)'),
+ ('(-3,4.3333333333)(40,1)'),
+ ('(0,100)(0,infinity)'),
+ ('(-infinity,0)(0,infinity)'),
+ ('(-infinity,-infinity)(infinity,infinity)');
+
+ SET enable_seqscan = false;
+
+ SELECT * FROM box_temp WHERE f1 << '(10,20),(30,40)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 << '(10,20),(30,40)';
+
+ SELECT * FROM box_temp WHERE f1 &< '(10,4.333334),(5,100)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 &< '(10,4.333334),(5,100)';
+
+ SELECT * FROM box_temp WHERE f1 && '(15,20),(25,30)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 && '(15,20),(25,30)';
+
+ SELECT * FROM box_temp WHERE f1 &> '(40,30),(45,50)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 &> '(40,30),(45,50)';
+
+ SELECT * FROM box_temp WHERE f1 >> '(30,40),(40,30)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 >> '(30,40),(40,30)';
+
+ SELECT * FROM box_temp WHERE f1 <<| '(10,4.33334),(5,100)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 <<| '(10,4.33334),(5,100)';
+
+ SELECT * FROM box_temp WHERE f1 &<| '(10,4.3333334),(5,1)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 &<| '(10,4.3333334),(5,1)';
+
+ SELECT * FROM box_temp WHERE f1 |&> '(49.99,49.99),(49.99,49.99)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 |&> '(49.99,49.99),(49.99,49.99)';
+
+ SELECT * FROM box_temp WHERE f1 |>> '(37,38),(39,40)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 |>> '(37,38),(39,40)';
+
+ SELECT * FROM box_temp WHERE f1 @> '(10,11),(15,16)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 @> '(10,11),(15,15)';
+
+ SELECT * FROM box_temp WHERE f1 <@ '(10,15),(30,35)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 <@ '(10,15),(30,35)';
+
+ SELECT * FROM box_temp WHERE f1 ~= '(20,20),(40,40)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 ~= '(20,20),(40,40)';
+
+ RESET enable_seqscan;
+
+ DROP INDEX box_spgist;
FROM CASE2_TBL b
WHERE j = -CASE_TBL.i;
-SELECT * FROM CASE_TBL;
+SELECT * FROM CASE_TBL ORDER BY i, f;
+ --
+ -- Nested CASE expressions
+ --
+
+ -- This test exercises a bug caused by aliasing econtext->caseValue_isNull
+ -- with the isNull argument of the inner CASE's ExecEvalCase() call. After
+ -- evaluating the vol(null) expression in the inner CASE's second WHEN-clause,
+ -- the isNull flag for the case test value incorrectly became true, causing
+ -- the third WHEN-clause not to match. The volatile function calls are needed
+ -- to prevent constant-folding in the planner, which would hide the bug.
+
+ CREATE FUNCTION vol(text) returns text as
+ 'begin return $1; end' language plpgsql volatile;
+
+ SELECT CASE
+ (CASE vol('bar')
+ WHEN 'foo' THEN 'it was foo!'
+ WHEN vol(null) THEN 'null input'
+ WHEN 'bar' THEN 'it was bar!' END
+ )
+ WHEN 'it was foo!' THEN 'foo recognized'
+ WHEN 'it was bar!' THEN 'bar recognized'
+ ELSE 'unrecognized' END;
+
+ -- In this case, we can't inline the SQL function without confusing things.
+ CREATE DOMAIN foodomain AS text;
+
+ CREATE FUNCTION volfoo(text) returns foodomain as
+ 'begin return $1::foodomain; end' language plpgsql volatile;
+
+ CREATE FUNCTION inline_eq(foodomain, foodomain) returns boolean as
+ 'SELECT CASE $2::text WHEN $1::text THEN true ELSE false END' language sql;
+
+ CREATE OPERATOR = (procedure = inline_eq,
+ leftarg = foodomain, rightarg = foodomain);
+
+ SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' END;
+
--
-- Clean up
--
create temp table clstr_temp (col1 int primary key, col2 text);
insert into clstr_temp values (2, 'two'), (1, 'one');
cluster clstr_temp using clstr_temp_pkey;
-select * from clstr_temp;
+select * from clstr_temp order by 1;
drop table clstr_temp;
+ RESET SESSION AUTHORIZATION;
+
+ -- Test CLUSTER with external tuplesorting
+
+ create table clstr_4 as select * from tenk1;
+ create index cluster_sort on clstr_4 (hundred, thousand, tenthous);
+ -- ensure we don't use the index in CLUSTER nor the checking SELECTs
+ set enable_indexscan = off;
+
+ -- Use external sort that only ever uses quicksort to sort runs:
+ set maintenance_work_mem = '1MB';
+ set replacement_sort_tuples = 0;
+ cluster clstr_4 using cluster_sort;
+ select * from
+ (select hundred, lag(hundred) over () as lhundred,
+ thousand, lag(thousand) over () as lthousand,
+ tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss
+ where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous);
+
+ -- Replacement selection will now be forced. It should only produce a single
+ -- run, due to the fact that input is found to be presorted:
+ set replacement_sort_tuples = 150000;
+ cluster clstr_4 using cluster_sort;
+ select * from
+ (select hundred, lag(hundred) over () as lhundred,
+ thousand, lag(thousand) over () as lthousand,
+ tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss
+ where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous);
+
+ reset enable_indexscan;
+ reset maintenance_work_mem;
+ reset replacement_sort_tuples;
+
-- clean up
- \c -
DROP TABLE clustertest;
DROP TABLE clstr_1;
DROP TABLE clstr_2;
INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
-SELECT '' AS five, * FROM FLOAT8_TBL;
+SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1;
+
+ -- test exact cases for trigonometric functions in degrees
+ SET extra_float_digits = 3;
+
+ SELECT x,
+ sind(x),
+ sind(x) IN (-1,-0.5,0,0.5,1) AS sind_exact
+ FROM (VALUES (0), (30), (90), (150), (180),
+ (210), (270), (330), (360)) AS t(x);
+
+ SELECT x,
+ cosd(x),
+ cosd(x) IN (-1,-0.5,0,0.5,1) AS cosd_exact
+ FROM (VALUES (0), (60), (90), (120), (180),
+ (240), (270), (300), (360)) AS t(x);
+
+ SELECT x,
+ tand(x),
+ tand(x) IN ('-Infinity'::float8,-1,0,
+ 1,'Infinity'::float8) AS tand_exact,
+ cotd(x),
+ cotd(x) IN ('-Infinity'::float8,-1,0,
+ 1,'Infinity'::float8) AS cotd_exact
+ FROM (VALUES (0), (45), (90), (135), (180),
+ (225), (270), (315), (360)) AS t(x);
+
+ SELECT x,
+ asind(x),
+ asind(x) IN (-90,-30,0,30,90) AS asind_exact,
+ acosd(x),
+ acosd(x) IN (0,60,90,120,180) AS acosd_exact
+ FROM (VALUES (-1), (-0.5), (0), (0.5), (1)) AS t(x);
+
+ SELECT x,
+ atand(x),
+ atand(x) IN (-90,-45,0,45,90) AS atand_exact
+ FROM (VALUES ('-Infinity'::float8), (-1), (0), (1),
+ ('Infinity'::float8)) AS t(x);
+
+ SELECT x, y,
+ atan2d(y, x),
+ atan2d(y, x) IN (-90,0,90,180) AS atan2d_exact
+ FROM (SELECT 10*cosd(a), 10*sind(a)
+ FROM generate_series(0, 360, 90) AS t(a)) AS t(x,y);
+
+ RESET extra_float_digits;
--
insert into inserttest values(30, 50, repeat('x', 10000));
-select col1, col2, char_length(col3) from inserttest;
+select col1, col2, char_length(col3) from inserttest order by 1,2,3;
drop table inserttest;
+
+ --
+ -- check indirection (field/array assignment), cf bug #14265
+ --
+ -- these tests are aware that transformInsertStmt has 3 separate code paths
+ --
+
+ create type insert_test_type as (if1 int, if2 text[]);
+
+ create table inserttest (f1 int, f2 int[],
+ f3 insert_test_type, f4 insert_test_type[]);
+
+ insert into inserttest (f2[1], f2[2]) values (1,2);
+ insert into inserttest (f2[1], f2[2]) values (3,4), (5,6);
+ insert into inserttest (f2[1], f2[2]) select 7,8;
+ insert into inserttest (f2[1], f2[2]) values (1,default); -- not supported
+
+ insert into inserttest (f3.if1, f3.if2) values (1,array['foo']);
+ insert into inserttest (f3.if1, f3.if2) values (1,'{foo}'), (2,'{bar}');
+ insert into inserttest (f3.if1, f3.if2) select 3, '{baz,quux}';
+ insert into inserttest (f3.if1, f3.if2) values (1,default); -- not supported
+
+ insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar');
+ insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar'), ('baz', 'quux');
+ insert into inserttest (f3.if2[1], f3.if2[2]) select 'bear', 'beer';
+
+ insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar');
+ insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar'), ('baz', 'quux');
+ insert into inserttest (f4[1].if2[1], f4[1].if2[2]) select 'bear', 'beer';
+
+ select * from inserttest;
+
+ -- also check reverse-listing
+ create table inserttest2 (f1 bigint, f2 text);
+ create rule irule1 as on insert to inserttest2 do also
+ insert into inserttest (f3.if2[1], f3.if2[2])
+ values (new.f1,new.f2);
+ create rule irule2 as on insert to inserttest2 do also
+ insert into inserttest (f4[1].if1, f4[1].if2[2])
+ values (1,'fool'),(new.f1,new.f2);
+ create rule irule3 as on insert to inserttest2 do also
+ insert into inserttest (f4[1].if1, f4[1].if2[2])
+ select new.f1, new.f2;
+ \d+ inserttest2
+
+ drop table inserttest2;
+ drop table inserttest;
+ drop type insert_test_type;
-- create various views
EXPLAIN (costs off)
- CREATE MATERIALIZED VIEW tvm AS SELECT * FROM tv ORDER BY type;
- CREATE MATERIALIZED VIEW tvm AS SELECT * FROM tv ORDER BY type;
- SELECT * FROM tvm;
- CREATE MATERIALIZED VIEW tmm AS SELECT sum(totamt) AS grandtot FROM tm;
- CREATE MATERIALIZED VIEW tvmm AS SELECT sum(totamt) AS grandtot FROM tvm;
- CREATE UNIQUE INDEX tvmm_expr ON tvmm ((grandtot > 0));
- CREATE UNIQUE INDEX tvmm_pred ON tvmm (grandtot) WHERE grandtot < 0;
- CREATE VIEW tvv AS SELECT sum(totamt) AS grandtot FROM tv;
+ CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type;
+ CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type;
+ SELECT * FROM mvtest_tvm;
+ CREATE MATERIALIZED VIEW mvtest_tmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tm;
+ CREATE MATERIALIZED VIEW mvtest_tvmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tvm;
+ CREATE UNIQUE INDEX mvtest_tvmm_expr ON mvtest_tvmm ((grandtot > 0));
+ CREATE UNIQUE INDEX mvtest_tvmm_pred ON mvtest_tvmm (grandtot) WHERE grandtot < 0;
+ CREATE VIEW mvtest_tvv AS SELECT sum(totamt) AS grandtot FROM mvtest_tv;
EXPLAIN (costs off)
- CREATE MATERIALIZED VIEW tvvm AS SELECT * FROM tvv;
- CREATE MATERIALIZED VIEW tvvm AS SELECT * FROM tvv;
- CREATE VIEW tvvmv AS SELECT * FROM tvvm;
- CREATE MATERIALIZED VIEW bb AS SELECT * FROM tvvmv;
- CREATE INDEX aa ON bb (grandtot);
+ CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv;
+ CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv;
+ CREATE VIEW mvtest_tvvmv AS SELECT * FROM mvtest_tvvm;
+ CREATE MATERIALIZED VIEW mvtest_bb AS SELECT * FROM mvtest_tvvmv;
+ CREATE INDEX mvtest_aa ON mvtest_bb (grandtot);
-- check that plans seem reasonable
- \d+ tvm
- \d+ tvm
- \d+ tvvm
- \d+ bb
+ \d+ mvtest_tvm
+ \d+ mvtest_tvm
+ \d+ mvtest_tvvm
+ \d+ mvtest_bb
-- test schema behavior
- CREATE SCHEMA mvschema;
- ALTER MATERIALIZED VIEW tvm SET SCHEMA mvschema;
- \d+ tvm
- \d+ tvmm
- SET search_path = mvschema, public;
- \d+ tvm
+ CREATE SCHEMA mvtest_mvschema;
+ ALTER MATERIALIZED VIEW mvtest_tvm SET SCHEMA mvtest_mvschema;
+ \d+ mvtest_tvm
+ \d+ mvtest_tvmm
+ SET search_path = mvtest_mvschema, public;
+ \d+ mvtest_tvm
-- modify the underlying table data
- INSERT INTO t VALUES (6, 'z', 13);
+ INSERT INTO mvtest_t VALUES (6, 'z', 13);
-- confirm pre- and post-refresh contents of fairly simple materialized views
- SELECT * FROM tm ORDER BY type;
- SELECT * FROM tvm ORDER BY type;
- REFRESH MATERIALIZED VIEW tvm;
- SELECT * FROM tm ORDER BY type;
- SELECT * FROM tvm ORDER BY type;
+ SELECT * FROM mvtest_tm ORDER BY type;
+ SELECT * FROM mvtest_tvm ORDER BY type;
-REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tm;
+ REFRESH MATERIALIZED VIEW mvtest_tvm;
+ SELECT * FROM mvtest_tm ORDER BY type;
+ SELECT * FROM mvtest_tvm ORDER BY type;
RESET search_path;
-- confirm pre- and post-refresh contents of nested materialized views
CREATE MATERIALIZED VIEW mv_test3 AS SELECT * FROM mv_test2 WHERE moo = 12345;
SELECT relispopulated FROM pg_class WHERE oid = 'mv_test3'::regclass;
- DROP VIEW v_test1 CASCADE;
+ DROP VIEW mvtest_vt1 CASCADE;
-- test that vacuum does not make empty matview look unpopulated
- CREATE TABLE hoge (i int);
- INSERT INTO hoge VALUES (generate_series(1,100000));
- CREATE MATERIALIZED VIEW hogeview AS SELECT * FROM hoge WHERE i % 2 = 0;
- CREATE INDEX hogeviewidx ON hogeview (i);
- DELETE FROM hoge;
- REFRESH MATERIALIZED VIEW hogeview;
- SELECT * FROM hogeview WHERE i < 10;
- VACUUM ANALYZE hogeview;
- SELECT * FROM hogeview WHERE i < 10;
- DROP TABLE hoge CASCADE;
+ CREATE TABLE mvtest_huge (i int);
+ INSERT INTO mvtest_huge VALUES (generate_series(1,100000));
+ CREATE MATERIALIZED VIEW mvtest_hugeview AS SELECT * FROM mvtest_huge WHERE i % 2 = 0;
+ CREATE INDEX mvtest_hugeviewidx ON mvtest_hugeview (i);
+ DELETE FROM mvtest_huge;
+ REFRESH MATERIALIZED VIEW mvtest_hugeview;
+ SELECT * FROM mvtest_hugeview WHERE i < 10;
+ VACUUM ANALYZE mvtest_hugeview;
+ SELECT * FROM mvtest_hugeview WHERE i < 10;
+ DROP TABLE mvtest_huge CASCADE;
-- test that duplicate values on unique index prevent refresh
- CREATE TABLE foo(a, b) AS VALUES(1, 10);
- CREATE MATERIALIZED VIEW mv AS SELECT * FROM foo;
- CREATE UNIQUE INDEX ON mv(a);
- INSERT INTO foo SELECT * FROM foo;
- REFRESH MATERIALIZED VIEW mv;
- DROP TABLE foo CASCADE;
+ CREATE TABLE mvtest_foo(a, b) AS VALUES(1, 10);
+ CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo;
+ CREATE UNIQUE INDEX ON mvtest_mv(a);
+ INSERT INTO mvtest_foo SELECT * FROM mvtest_foo;
+ REFRESH MATERIALIZED VIEW mvtest_mv;
+ REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv;
+ DROP TABLE mvtest_foo CASCADE;
-- make sure that all columns covered by unique indexes works
- CREATE TABLE foo(a, b, c) AS VALUES(1, 2, 3);
- CREATE MATERIALIZED VIEW mv AS SELECT * FROM foo;
- CREATE UNIQUE INDEX ON mv (a);
- CREATE UNIQUE INDEX ON mv (b);
- CREATE UNIQUE INDEX on mv (c);
- INSERT INTO foo VALUES(2, 3, 4);
- INSERT INTO foo VALUES(3, 4, 5);
- REFRESH MATERIALIZED VIEW mv;
- DROP TABLE foo CASCADE;
+ CREATE TABLE mvtest_foo(a, b, c) AS VALUES(1, 2, 3);
+ CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo;
+ CREATE UNIQUE INDEX ON mvtest_mv (a);
+ CREATE UNIQUE INDEX ON mvtest_mv (b);
+ CREATE UNIQUE INDEX on mvtest_mv (c);
+ INSERT INTO mvtest_foo VALUES(2, 3, 4);
+ INSERT INTO mvtest_foo VALUES(3, 4, 5);
+ REFRESH MATERIALIZED VIEW mvtest_mv;
-REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv;
+ DROP TABLE mvtest_foo CASCADE;
-- allow subquery to reference unpopulated matview if WITH NO DATA is specified
- CREATE MATERIALIZED VIEW mv1 AS SELECT 1 AS col1 WITH NO DATA;
- CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM mv1
- WHERE col1 = (SELECT LEAST(col1) FROM mv1) WITH NO DATA;
- DROP MATERIALIZED VIEW mv1 CASCADE;
+ CREATE MATERIALIZED VIEW mvtest_mv1 AS SELECT 1 AS col1 WITH NO DATA;
+ CREATE MATERIALIZED VIEW mvtest_mv2 AS SELECT * FROM mvtest_mv1
+ WHERE col1 = (SELECT LEAST(col1) FROM mvtest_mv1) WITH NO DATA;
+ DROP MATERIALIZED VIEW mvtest_mv1 CASCADE;
-- make sure that types with unusual equality tests work
- CREATE TABLE boxes (id serial primary key, b box);
- INSERT INTO boxes (b) VALUES
+ CREATE TABLE mvtest_boxes (id serial primary key, b box);
+ INSERT INTO mvtest_boxes (b) VALUES
('(32,32),(31,31)'),
('(2.0000004,2.0000004),(1,1)'),
('(1.9999996,1.9999996),(1,1)');
- CREATE MATERIALIZED VIEW boxmv AS SELECT * FROM boxes;
- CREATE UNIQUE INDEX boxmv_id ON boxmv (id);
- UPDATE boxes SET b = '(2,2),(1,1)' WHERE id = 2;
- SELECT * FROM boxmv ORDER BY id;
- DROP TABLE boxes CASCADE;
+ CREATE MATERIALIZED VIEW mvtest_boxmv AS SELECT * FROM mvtest_boxes;
+ CREATE UNIQUE INDEX mvtest_boxmv_id ON mvtest_boxmv (id);
+ UPDATE mvtest_boxes SET b = '(2,2),(1,1)' WHERE id = 2;
-REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_boxmv;
+ SELECT * FROM mvtest_boxmv ORDER BY id;
+ DROP TABLE mvtest_boxes CASCADE;
-- make sure that column names are handled correctly
- CREATE TABLE v (i int, j int);
- CREATE MATERIALIZED VIEW mv_v (ii) AS SELECT i, j AS jj FROM v;
- ALTER TABLE v RENAME COLUMN i TO x;
- INSERT INTO v values (1, 2);
- CREATE UNIQUE INDEX mv_v_ii ON mv_v (ii);
- REFRESH MATERIALIZED VIEW mv_v;
- UPDATE v SET j = 3 WHERE x = 1;
- SELECT * FROM v;
- SELECT * FROM mv_v;
- DROP TABLE v CASCADE;
+ CREATE TABLE mvtest_v (i int, j int);
+ CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj, kk) AS SELECT i, j FROM mvtest_v; -- error
+ CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj) AS SELECT i, j FROM mvtest_v; -- ok
+ CREATE MATERIALIZED VIEW mvtest_mv_v_2 (ii) AS SELECT i, j FROM mvtest_v; -- ok
+ CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj, kk) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- error
+ CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok
+ CREATE MATERIALIZED VIEW mvtest_mv_v_4 (ii) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok
+ ALTER TABLE mvtest_v RENAME COLUMN i TO x;
+ INSERT INTO mvtest_v values (1, 2);
+ CREATE UNIQUE INDEX mvtest_mv_v_ii ON mvtest_mv_v (ii);
+ REFRESH MATERIALIZED VIEW mvtest_mv_v;
+ UPDATE mvtest_v SET j = 3 WHERE x = 1;
-REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_v;
+ REFRESH MATERIALIZED VIEW mvtest_mv_v_2;
+ REFRESH MATERIALIZED VIEW mvtest_mv_v_3;
+ REFRESH MATERIALIZED VIEW mvtest_mv_v_4;
+ SELECT * FROM mvtest_v;
+ SELECT * FROM mvtest_mv_v;
+ SELECT * FROM mvtest_mv_v_2;
+ SELECT * FROM mvtest_mv_v_3;
+ SELECT * FROM mvtest_mv_v_4;
+ DROP TABLE mvtest_v CASCADE;
+
+ -- make sure that create WITH NO DATA does not plan the query (bug #13907)
+ create materialized view mvtest_error as select 1/0 as x; -- fail
+ create materialized view mvtest_error as select 1/0 as x with no data;
+ refresh materialized view mvtest_error; -- fail here
+ drop materialized view mvtest_error;
-- make sure that matview rows can be referenced as source rows (bug #9398)
- CREATE TABLE v AS SELECT generate_series(1,10) AS a;
- CREATE MATERIALIZED VIEW mv_v AS SELECT a FROM v WHERE a <= 5;
- DELETE FROM v WHERE EXISTS ( SELECT * FROM mv_v WHERE mv_v.a = v.a );
- SELECT * FROM v;
- SELECT * FROM mv_v;
- DROP TABLE v CASCADE;
+ CREATE TABLE mvtest_v AS SELECT generate_series(1,10) AS a;
+ CREATE MATERIALIZED VIEW mvtest_mv_v AS SELECT a FROM mvtest_v WHERE a <= 5;
+ DELETE FROM mvtest_v WHERE EXISTS ( SELECT * FROM mvtest_mv_v WHERE mvtest_mv_v.a = mvtest_v.a );
+ SELECT * FROM mvtest_v;
+ SELECT * FROM mvtest_mv_v;
+ DROP TABLE mvtest_v CASCADE;
-- make sure running as superuser works when MV owned by another role (bug #11208)
- CREATE ROLE user_dw;
- SET ROLE user_dw;
- CREATE TABLE foo_data AS SELECT i, md5(random()::text)
+ CREATE ROLE regress_user_mvtest;
+ SET ROLE regress_user_mvtest;
+ CREATE TABLE mvtest_foo_data AS SELECT i, md5(random()::text)
FROM generate_series(1, 10) i;
- CREATE MATERIALIZED VIEW mv_foo AS SELECT * FROM foo_data;
- CREATE MATERIALIZED VIEW mv_foo AS SELECT * FROM foo_data;
- CREATE MATERIALIZED VIEW IF NOT EXISTS mv_foo AS SELECT * FROM foo_data;
- CREATE UNIQUE INDEX ON mv_foo (i);
+ CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ CREATE MATERIALIZED VIEW IF NOT EXISTS mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ CREATE UNIQUE INDEX ON mvtest_mv_foo (i);
RESET ROLE;
- REFRESH MATERIALIZED VIEW mv_foo;
- DROP OWNED BY user_dw CASCADE;
- DROP ROLE user_dw;
+ REFRESH MATERIALIZED VIEW mvtest_mv_foo;
-REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_foo;
+ DROP OWNED BY regress_user_mvtest CASCADE;
+ DROP ROLE regress_user_mvtest;
+
+ -- make sure that create WITH NO DATA works via SPI
+ BEGIN;
+ CREATE FUNCTION mvtest_func()
+ RETURNS void AS $$
+ BEGIN
+ CREATE MATERIALIZED VIEW mvtest1 AS SELECT 1 AS x;
+ CREATE MATERIALIZED VIEW mvtest2 AS SELECT 1 AS x WITH NO DATA;
+ END;
+ $$ LANGUAGE plpgsql;
+ SELECT mvtest_func();
+ SELECT * FROM mvtest1;
+ SELECT * FROM mvtest2;
+ ROLLBACK;
SELECT '' AS one, c.f1 FROM NAME_TBL c WHERE c.f1 = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
-SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 < '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 < '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR' ORDER BY f1;
-SELECT '' AS four, c.f1 FROM NAME_TBL c WHERE c.f1 <= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT '' AS four, c.f1 FROM NAME_TBL c WHERE c.f1 <= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR' ORDER BY f1;
-SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 > '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 > '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR' ORDER BY f1;
-SELECT '' AS four, c.f1 FROM NAME_TBL c WHERE c.f1 >= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT '' AS four, c.f1 FROM NAME_TBL c WHERE c.f1 >= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR' ORDER BY f1;
-SELECT '' AS seven, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*';
+SELECT '' AS seven, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*' ORDER BY f1;
-SELECT '' AS zero, c.f1 FROM NAME_TBL c WHERE c.f1 !~ '.*';
+SELECT '' AS zero, c.f1 FROM NAME_TBL c WHERE c.f1 !~ '.*' ORDER BY f1;
-SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '[0-9]';
+SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '[0-9]' ORDER BY f1;
-SELECT '' AS two, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*asdf.*';
+SELECT '' AS two, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*asdf.*' ORDER BY f1;
DROP TABLE NAME_TBL;
+
+ DO $$
+ DECLARE r text[];
+ BEGIN
+ r := parse_ident('Schemax.Tabley');
+ RAISE NOTICE '%', format('%I.%I', r[1], r[2]);
+ r := parse_ident('"SchemaX"."TableY"');
+ RAISE NOTICE '%', format('%I.%I', r[1], r[2]);
+ END;
+ $$;
+
+ SELECT parse_ident('foo.boo');
+ SELECT parse_ident('foo.boo[]'); -- should fail
+ SELECT parse_ident('foo.boo[]', strict => false); -- ok
+
+ -- should fail
+ SELECT parse_ident(' ');
+ SELECT parse_ident(' .aaa');
+ SELECT parse_ident(' aaa . ');
+ SELECT parse_ident('aaa.a%b');
+ SELECT parse_ident(E'X\rXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX');
+
+ SELECT length(a[1]), length(a[2]) from parse_ident('"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx".yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy') as a ;
+
+ SELECT parse_ident(' first . " second " ." third ". " ' || repeat('x',66) || '"');
+ SELECT parse_ident(' first . " second " ." third ". " ' || repeat('x',66) || '"')::name[];
+
+ SELECT parse_ident(E'"c".X XXXX\002XXXXXX');
+ SELECT parse_ident('1020');
+ SELECT parse_ident('10.20');
+ SELECT parse_ident('.');
+ SELECT parse_ident('.1020');
+ SELECT parse_ident('xxx.1020');
INSERT INTO ceil_floor_round VALUES ('0.0');
INSERT INTO ceil_floor_round VALUES ('0.0000001');
INSERT INTO ceil_floor_round VALUES ('-0.000001');
-SELECT a, ceil(a), ceiling(a), floor(a), round(a) FROM ceil_floor_round;
+SELECT a, ceil(a), ceiling(a), floor(a), round(a) FROM ceil_floor_round ORDER BY a;
DROP TABLE ceil_floor_round;
+ -- Check rounding, it should round ties away from zero.
+ SELECT i as pow,
+ round((-2.5 * 10 ^ i)::numeric, -i),
+ round((-1.5 * 10 ^ i)::numeric, -i),
+ round((-0.5 * 10 ^ i)::numeric, -i),
+ round((0.5 * 10 ^ i)::numeric, -i),
+ round((1.5 * 10 ^ i)::numeric, -i),
+ round((2.5 * 10 ^ i)::numeric, -i)
+ FROM generate_series(-5,5) AS t(i);
+
-- Testing for width_bucket(). For convenience, we test both the
-- numeric and float8 versions of the function in this file.
INSERT INTO num_input_test(n1) VALUES ('');
INSERT INTO num_input_test(n1) VALUES (' N aN ');
-SELECT * FROM num_input_test;
+SELECT * FROM num_input_test ORDER BY n1;
+ --
+ -- Test some corner cases for multiplication
+ --
+
+ select 4790999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+
+ select 4789999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+
+ select 4770999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+
+ select 4769999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+
--
-- Test some corner cases for division
--
drop function arrayassign1();
drop function testoa(x1 int, x2 int, x3 int);
+-- Check that DMLs in a plpgsql function work OK, when subsequent queries need
+-- to open new datanode connections
+CREATE OR REPLACE FUNCTION TestJoinTempTable_CT()
+RETURNS void AS $$
+BEGIN
+ CREATE TABLE IF NOT EXISTS RealTable(ProductId int, ScenarioId int);
+ TRUNCATE TABLE RealTable;
+
+ CREATE TABLE IF NOT EXISTS TmpBar(NodeId int)
+ DISTRIBUTE BY REPLICATION;
+ CREATE TABLE IF NOT EXISTS TmpFoo(TempId int)
+ DISTRIBUTE BY REPLICATION;
+END ;
+$$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION TestJoinTempTable_INSERT()
+RETURNS void AS $$
+BEGIN
+ INSERT INTO RealTable(ProductId, ScenarioId)
+ SELECT generate_series(1,1000) as ProductId, (random() * 100)::int as ScenarioId;
+
+ INSERT INTO TmpBar(NodeId)
+ SELECT generate_series(1,1000);
+ RAISE INFO 'number of existing rows in RealTable - %', (SELECT count(*) FROM RealTable);
+ RAISE INFO 'number of existing rows in TmpBar - %', (SELECT count(*) FROM TmpBar);
+ RAISE INFO 'number of existing rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
+ INSERT INTO TmpFoo(TempId)
+ SELECT DISTINCT(PR.ProductId)
+ FROM RealTable AS PR
+ JOIN TmpBar tmp1 ON PR.ProductId = tmp1.NodeId;
+
+ RAISE INFO 'number of rows produced by query - %',
+ (SELECT COUNT(DISTINCT(PR.ProductId))
+ FROM RealTable AS PR
+ JOIN TmpBar tmp1 ON PR.ProductId = tmp1.NodeId);
+ RAISE INFO 'number of rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
+ RAISE INFO 'number of existing rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
+ RAISE INFO 'number of existing rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
+END ;
+$$ LANGUAGE plpgsql;
+
+SELECT TestJoinTempTable_CT();
+SELECT TestJoinTempTable_INSERT();
+
+DROP TABLE RealTable;
+DROP TABLE TmpBar;
+DROP TABLE TmpFoo;
+
+CREATE OR REPLACE FUNCTION TestJoinTempTable()
+RETURNS void AS $$
+BEGIN
+ CREATE TABLE IF NOT EXISTS RealTable(ProductId int, ScenarioId int);
+ TRUNCATE TABLE RealTable;
+
+ CREATE TEMPORARY TABLE IF NOT EXISTS TmpBar(NodeId int)
+ DISTRIBUTE BY REPLICATION;
+ CREATE TEMPORARY TABLE IF NOT EXISTS TmpFoo(TempId int)
+ DISTRIBUTE BY REPLICATION;
+
+ INSERT INTO RealTable(ProductId, ScenarioId)
+ SELECT generate_series(1,1000) as ProductId, (random() * 100)::int as ScenarioId;
+
+ INSERT INTO TmpBar(NodeId)
+ SELECT generate_series(1,1000);
+
+ INSERT INTO TmpFoo(TempId)
+ SELECT DISTINCT(PR.ProductId)
+ FROM RealTable AS PR
+ JOIN TmpBar tmp1 ON PR.ProductId = tmp1.NodeId;
+END ;
+$$ LANGUAGE plpgsql;
+
+SELECT TestJoinTempTable();
+
+-- Multiple invokations of the function showed interesting issues with command
+-- passdown. So add that to the test case
+SELECT TestJoinTempTable();
+SELECT TestJoinTempTable();
+
+DROP TABLE RealTable;
+DROP TABLE TmpBar;
+DROP TABLE TmpFoo;
- -- access to call stack
+ --
+ -- Test handling of expanded arrays
+ --
+
+ create function returns_rw_array(int) returns int[]
+ language plpgsql as $$
+ declare r int[];
+ begin r := array[$1, $1]; return r; end;
+ $$ stable;
+
+ create function consumes_rw_array(int[]) returns int
+ language plpgsql as $$
+ begin return $1[1]; end;
+ $$ stable;
+
+ -- bug #14174
+ explain (verbose, costs off)
+ select i, a from
+ (select returns_rw_array(1) as a offset 0) ss,
+ lateral consumes_rw_array(a) i;
+
+ select i, a from
+ (select returns_rw_array(1) as a offset 0) ss,
+ lateral consumes_rw_array(a) i;
+
+ explain (verbose, costs off)
+ select consumes_rw_array(a), a from returns_rw_array(1) a;
+
+ select consumes_rw_array(a), a from returns_rw_array(1) a;
+
+ explain (verbose, costs off)
+ select consumes_rw_array(a), a from
+ (values (returns_rw_array(1)), (returns_rw_array(2))) v(a);
+
+ select consumes_rw_array(a), a from
+ (values (returns_rw_array(1)), (returns_rw_array(2))) v(a);
+
+
+ --
+ -- Test access to call stack
+ --
+
create function inner_func(int)
returns int as $$
declare _context text;
SELECT * FROM pxtest2;
-- There should be two prepared transactions
-SELECT gid FROM pg_prepared_xacts;
+SELECT gid FROM pg_prepared_xacts ORDER BY gid;
+-- Check prepared transactions in the cluster
+SELECT pgxc_prepared_xact FROM pgxc_prepared_xacts ORDER by 1;
-- pxtest3 should be locked because of the pending DROP
+ begin;
set statement_timeout to 2000;
SELECT * FROM pxtest3;
- reset statement_timeout;
+ rollback;
-- Disconnect, we will continue testing in a different backend
\c -
-- There should still be two prepared transactions
-SELECT gid FROM pg_prepared_xacts;
+SELECT gid FROM pg_prepared_xacts ORDER BY gid;
+-- Check prepared transactions in the cluster
+SELECT pgxc_prepared_xact FROM pgxc_prepared_xacts ORDER by 1;
-- pxtest3 should still be locked because of the pending DROP
+ begin;
set statement_timeout to 2000;
SELECT * FROM pxtest3;
- reset statement_timeout;
+ rollback;
-- Commit table creation
COMMIT PREPARED 'regress-one';
SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) );
- SET SESSION AUTHORIZATION regressuser3;
+ SET SESSION AUTHORIZATION regress_user3;
SELECT session_user, current_user;
-SELECT * FROM atest1; -- ok
+SELECT * FROM atest1 ORDER BY 1; -- ok
SELECT * FROM atest2; -- fail
INSERT INTO atest1 VALUES (2, 'two'); -- fail
INSERT INTO atest2 VALUES ('foo', true); -- fail
reset constraint_exclusion;
CREATE VIEW atestv4 AS SELECT * FROM atestv3; -- nested view
-SELECT * FROM atestv4; -- ok
+SELECT * FROM atestv4; -- fail due to issue 3520503, see above
- GRANT SELECT ON atestv4 TO regressuser2;
+ GRANT SELECT ON atestv4 TO regress_user2;
- SET SESSION AUTHORIZATION regressuser2;
+ SET SESSION AUTHORIZATION regress_user2;
-- Two complex cases:
SELECT * FROM atestv3; -- fail
- SELECT * FROM atestv4; -- ok (even though regressuser2 cannot access underlying atestv3)
+-- fail due to issue 3520503, see above
+ SELECT * FROM atestv4; -- ok (even though regress_user2 cannot access underlying atestv3)
SELECT * FROM atest2; -- ok
- SELECT * FROM atestv2; -- fail (even though regressuser2 can access underlying atest2)
+ SELECT * FROM atestv2; -- fail (even though regress_user2 can access underlying atest2)
-- Test column level permissions
SELECT 1 FROM atest5 WHERE two = 2; -- fail
SELECT * FROM atest1, atest5; -- fail
SELECT atest1.* FROM atest1, atest5; -- ok
-SELECT atest1.*,atest5.one FROM atest1, atest5; -- ok
+SELECT atest1.*,atest5.one FROM atest1, atest5; -- ok
SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.two); -- fail
-SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.one); -- ok
+SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.one); -- ok
SELECT one, two FROM atest5; -- fail
- SET SESSION AUTHORIZATION regressuser1;
- GRANT SELECT (one,two) ON atest6 TO regressuser4;
+ SET SESSION AUTHORIZATION regress_user1;
+ GRANT SELECT (one,two) ON atest6 TO regress_user4;
- SET SESSION AUTHORIZATION regressuser4;
+ SET SESSION AUTHORIZATION regress_user4;
SELECT one, two FROM atest5 NATURAL JOIN atest6; -- fail still
- SET SESSION AUTHORIZATION regressuser1;
- GRANT SELECT (two) ON atest5 TO regressuser4;
+ SET SESSION AUTHORIZATION regress_user1;
+ GRANT SELECT (two) ON atest5 TO regress_user4;
- SET SESSION AUTHORIZATION regressuser4;
- SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now
+ SET SESSION AUTHORIZATION regress_user4;
+ SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now
-- test column-level privileges for INSERT and UPDATE
-INSERT INTO atest5 (two) VALUES (3); -- ok
+INSERT INTO atest5 (two) VALUES (3); -- fail due to issue 3520503, see above
COPY atest5 FROM stdin; -- fail
COPY atest5 (two) FROM stdin; -- ok
1
SELECT atest6 FROM atest6; -- fail
SELECT one FROM atest5 NATURAL JOIN atest6; -- fail
- SET SESSION AUTHORIZATION regressuser1;
+ SET SESSION AUTHORIZATION regress_user1;
ALTER TABLE atest6 DROP COLUMN three;
- SET SESSION AUTHORIZATION regressuser4;
+ SET SESSION AUTHORIZATION regress_user4;
SELECT atest6 FROM atest6; -- ok
-SELECT one FROM atest5 NATURAL JOIN atest6; -- ok
+SELECT one FROM atest5 NATURAL JOIN atest6; -- ok
- SET SESSION AUTHORIZATION regressuser1;
+ SET SESSION AUTHORIZATION regress_user1;
ALTER TABLE atest6 DROP COLUMN two;
- REVOKE SELECT (one,blue) ON atest6 FROM regressuser4;
+ REVOKE SELECT (one,blue) ON atest6 FROM regress_user4;
- SET SESSION AUTHORIZATION regressuser4;
+ SET SESSION AUTHORIZATION regress_user4;
SELECT * FROM atest6; -- fail
SELECT 1 FROM atest6; -- fail
CREATE TABLE atestp1 (f1 int, f2 int) WITH OIDS;
CREATE TABLE atestp2 (fx int, fy int) WITH OIDS;
CREATE TABLE atestc (fz int) INHERITS (atestp1, atestp2);
- GRANT SELECT(fx,fy,oid) ON atestp2 TO regressuser2;
- GRANT SELECT(fx) ON atestc TO regressuser2;
+ GRANT SELECT(fx,fy,oid) ON atestp2 TO regress_user2;
+ GRANT SELECT(fx) ON atestc TO regress_user2;
- SET SESSION AUTHORIZATION regressuser2;
+ SET SESSION AUTHORIZATION regress_user2;
SELECT fx FROM atestp2; -- ok
-SELECT fy FROM atestp2; -- ok
-SELECT atestp2 FROM atestp2; -- ok
-SELECT oid FROM atestp2; -- ok
+SELECT fy FROM atestp2; -- fail due to issue 3520503, see above
+SELECT atestp2 FROM atestp2; -- fail due to issue 3520503, see above
+SELECT oid FROM atestp2; -- fail due to issue 3520503, see above
SELECT fy FROM atestc; -- fail
- SET SESSION AUTHORIZATION regressuser1;
- GRANT SELECT(fy,oid) ON atestc TO regressuser2;
+ SET SESSION AUTHORIZATION regress_user1;
+ GRANT SELECT(fy,oid) ON atestc TO regress_user2;
- SET SESSION AUTHORIZATION regressuser2;
+ SET SESSION AUTHORIZATION regress_user2;
SELECT fx FROM atestp2; -- still ok
SELECT fy FROM atestp2; -- ok
-SELECT atestp2 FROM atestp2; -- ok
+SELECT atestp2 FROM atestp2; -- fail due to issue 3520503, see above
SELECT oid FROM atestp2; -- ok
-- privileges on functions, languages
SELECT testfunc1(5), testfunc2(5); -- ok
CREATE FUNCTION testfunc3(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -- fail
- SET SESSION AUTHORIZATION regressuser3;
+ SET SESSION AUTHORIZATION regress_user3;
SELECT testfunc1(5); -- fail
SELECT col1 FROM atest2 WHERE col2 = true; -- fail
-SELECT testfunc4(true); -- ok
+SELECT testfunc4(true); -- fail due to issue 3520503, see above
- SET SESSION AUTHORIZATION regressuser4;
+ SET SESSION AUTHORIZATION regress_user4;
SELECT testfunc1(5); -- ok
DROP FUNCTION testfunc1(int); -- fail
);
GRANT SELECT ON uaccount TO public;
INSERT INTO uaccount VALUES
- ('rls_regress_user0', 99),
- ('rls_regress_user1', 1),
- ('rls_regress_user2', 2),
- ('rls_regress_user3', 3);
+ ('regress_rls_alice', 99),
+ ('regress_rls_bob', 1),
+ ('regress_rls_carol', 2),
+ ('regress_rls_dave', 3);
+-- PGXL
+-- Distribute by replication so that "document" table below can reference "cid"
+-- column
+--
CREATE TABLE category (
cid int primary key,
cname text
INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 -0097');
INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC');
-SELECT '' AS "64", d1 FROM TIMESTAMP_TBL;
+SELECT '' AS "64", d1 FROM TIMESTAMP_TBL ORDER BY d1;
+ -- Check behavior at the lower boundary of the timestamp range
+ SELECT '4714-11-24 00:00:00 BC'::timestamp;
+ SELECT '4714-11-23 23:59:59 BC'::timestamp; -- out of range
+ -- The upper boundary differs between integer and float timestamps, so no check
+
-- Demonstrate functions and operators
SELECT '' AS "48", d1 FROM TIMESTAMP_TBL
- WHERE d1 > timestamp without time zone '1997-01-02';
+ WHERE d1 > timestamp without time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "15", d1 FROM TIMESTAMP_TBL
- WHERE d1 < timestamp without time zone '1997-01-02';
+ WHERE d1 < timestamp without time zone '1997-01-02' ORDER BY d1;
SELECT '' AS one, d1 FROM TIMESTAMP_TBL
- WHERE d1 = timestamp without time zone '1997-01-02';
+ WHERE d1 = timestamp without time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "63", d1 FROM TIMESTAMP_TBL
- WHERE d1 != timestamp without time zone '1997-01-02';
+ WHERE d1 != timestamp without time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "16", d1 FROM TIMESTAMP_TBL
- WHERE d1 <= timestamp without time zone '1997-01-02';
+ WHERE d1 <= timestamp without time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "49", d1 FROM TIMESTAMP_TBL
- WHERE d1 >= timestamp without time zone '1997-01-02';
+ WHERE d1 >= timestamp without time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "54", d1 - timestamp without time zone '1997-01-02' AS diff
- FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
+ FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01' ORDER BY d1;
SELECT '' AS date_trunc_week, date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc;
SELECT 'Wed Jul 11 10:51:14 PST-03:00 2001'::timestamptz;
SELECT 'Wed Jul 11 10:51:14 PST+03:00 2001'::timestamptz;
-SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL;
+SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL ORDER BY d1;
+ -- Check behavior at the lower boundary of the timestamp range
+ SELECT '4714-11-24 00:00:00+00 BC'::timestamptz;
+ SELECT '4714-11-23 16:00:00-08 BC'::timestamptz;
+ SELECT 'Sun Nov 23 16:00:00 4714 PST BC'::timestamptz;
+ SELECT '4714-11-23 23:59:59+00 BC'::timestamptz; -- out of range
+ -- The upper boundary differs between integer and float timestamps, so no check
+
-- Demonstrate functions and operators
SELECT '' AS "48", d1 FROM TIMESTAMPTZ_TBL
- WHERE d1 > timestamp with time zone '1997-01-02';
+ WHERE d1 > timestamp with time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "15", d1 FROM TIMESTAMPTZ_TBL
- WHERE d1 < timestamp with time zone '1997-01-02';
+ WHERE d1 < timestamp with time zone '1997-01-02' ORDER BY d1;
SELECT '' AS one, d1 FROM TIMESTAMPTZ_TBL
- WHERE d1 = timestamp with time zone '1997-01-02';
+ WHERE d1 = timestamp with time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "63", d1 FROM TIMESTAMPTZ_TBL
- WHERE d1 != timestamp with time zone '1997-01-02';
+ WHERE d1 != timestamp with time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "16", d1 FROM TIMESTAMPTZ_TBL
- WHERE d1 <= timestamp with time zone '1997-01-02';
+ WHERE d1 <= timestamp with time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "49", d1 FROM TIMESTAMPTZ_TBL
- WHERE d1 >= timestamp with time zone '1997-01-02';
+ WHERE d1 >= timestamp with time zone '1997-01-02' ORDER BY d1;
SELECT '' AS "54", d1 - timestamp with time zone '1997-01-02' AS diff
- FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
+ FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01' ORDER BY d1;
SELECT '' AS date_trunc_week, date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS week_trunc;
SELECT '' AS "54", d1 as timestamptz,
date_part( 'isoyear', d1) AS isoyear, date_part( 'week', d1) AS week,
date_part( 'dow', d1) AS dow
- FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
+ FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01' ORDER BY d1;
-- TO_CHAR()
-SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
- FROM TIMESTAMPTZ_TBL;
-
+SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
+
SELECT '' AS to_char_2, to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM')
- FROM TIMESTAMPTZ_TBL;
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
SELECT '' AS to_char_3, to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J')
- FROM TIMESTAMPTZ_TBL;
-
-SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
- FROM TIMESTAMPTZ_TBL;
-
-SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
- FROM TIMESTAMPTZ_TBL;
-
-SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
- FROM TIMESTAMPTZ_TBL;
-
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
+
+SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
+
+SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
+
+SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
+
SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS')
- FROM TIMESTAMPTZ_TBL;
-
-SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
- FROM TIMESTAMPTZ_TBL;
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
-SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
- FROM TIMESTAMPTZ_TBL;
+SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
+
+SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
SELECT '' AS to_char_10, to_char(d1, 'IYYY IYY IY I IW IDDD ID')
- FROM TIMESTAMPTZ_TBL;
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
SELECT '' AS to_char_11, to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID')
- FROM TIMESTAMPTZ_TBL;
+ FROM TIMESTAMPTZ_TBL ORDER BY d1;
+ -- Check OF with various zone offsets, particularly fractional hours
+ SET timezone = '00:00';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '+02:00';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '-13:00';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '-00:30';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '00:30';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '-04:30';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '04:30';
+ SELECT to_char(now(), 'OF');
+ RESET timezone;
+
CREATE TABLE TIMESTAMPTZ_TST (a int , b timestamptz);
-- Test year field value with len > 4
SELECT ts_lexize('english_stem', 'skies');
SELECT ts_lexize('english_stem', 'identity');
-SELECT * FROM ts_token_type('default');
+SELECT * FROM ts_token_type('default') ORDER BY tokid;
- SELECT * FROM ts_parse('default', '345
[email protected] '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005
[email protected] qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>">
+ SELECT * FROM ts_parse('default', '345
[email protected] '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005
[email protected] [email protected] [email protected] [email protected] qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>">
/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234
-<i <b> wow < jqw <> qwerty');
+<i <b> wow < jqw <> qwerty') ORDER BY tokid,token;
- SELECT to_tsvector('english', '345
[email protected] '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005
[email protected] qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>">
+ SELECT to_tsvector('english', '345
[email protected] '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005
[email protected] [email protected] [email protected] [email protected] qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>">
/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234
<i <b> wow < jqw <> qwerty');
insert into pendtest values (to_tsvector('Lore ipsum'));
select * from pendtest where 'ipsu:*'::tsquery @@ ts;
select * from pendtest where 'ipsa:*'::tsquery @@ ts;
-select * from pendtest where 'ips:*'::tsquery @@ ts;
+select * from pendtest where 'ips:*'::tsquery @@ ts ORDER BY 1;
select * from pendtest where 'ipt:*'::tsquery @@ ts;
select * from pendtest where 'ipi:*'::tsquery @@ ts;
+drop table pendtest;
+ --check OP_PHRASE on index
+ create temp table phrase_index_test(fts tsvector);
+ insert into phrase_index_test values ('A fat cat has just eaten a rat.');
+ insert into phrase_index_test values (to_tsvector('english', 'A fat cat has just eaten a rat.'));
+ create index phrase_index_test_idx on phrase_index_test using gin(fts);
+ set enable_seqscan = off;
+ select * from phrase_index_test where fts @@ phraseto_tsquery('english', 'fat cat');
+ set enable_seqscan = on;
-- permissions checks
- CREATE USER view_user1;
- CREATE USER view_user2;
+ CREATE USER regress_view_user1;
+ CREATE USER regress_view_user2;
- SET SESSION AUTHORIZATION view_user1;
+ SET SESSION AUTHORIZATION regress_view_user1;
-CREATE TABLE base_tbl(a int, b text, c float);
+CREATE TABLE base_tbl(a int, b text, c float) DISTRIBUTE BY REPLICATION;
INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0);
CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl;
INSERT INTO rw_view1 VALUES ('Row 2', 2.0, 2);