Merge commit 'b5bce6c1ec6061c8a4f730d927e162db7e2ce365'
authorPavan Deolasee <[email protected]>
Thu, 27 Oct 2016 15:02:55 +0000 (20:32 +0530)
committerPavan Deolasee <[email protected]>
Thu, 27 Oct 2016 15:02:55 +0000 (20:32 +0530)
484 files changed:
1  2 
.gitignore
configure
configure.in
contrib/Makefile
contrib/citext/Makefile
contrib/citext/expected/citext_1.out
contrib/citext/sql/citext.sql
contrib/hstore/expected/hstore.out
contrib/hstore/hstore_io.c
contrib/hstore/sql/hstore.sql
contrib/ltree/Makefile
contrib/pg_stat_statements/pg_stat_statements.c
contrib/pg_trgm/expected/pg_trgm.out
contrib/sepgsql/hooks.c
doc/bug.template
doc/src/sgml/auto-explain.sgml
doc/src/sgml/backup.sgml
doc/src/sgml/btree-gist.sgml
doc/src/sgml/catalogs.sgml
doc/src/sgml/config.sgml
doc/src/sgml/contrib.sgml
doc/src/sgml/datatype.sgml
doc/src/sgml/dblink.sgml
doc/src/sgml/ddl.sgml
doc/src/sgml/filelist.sgml
doc/src/sgml/func.sgml
doc/src/sgml/high-availability.sgml
doc/src/sgml/indices.sgml
doc/src/sgml/installation.sgml
doc/src/sgml/keywords.sgml
doc/src/sgml/legal.sgml
doc/src/sgml/libpq.sgml
doc/src/sgml/lobj.sgml
doc/src/sgml/ltree.sgml
doc/src/sgml/maintenance.sgml
doc/src/sgml/manage-ag.sgml
doc/src/sgml/mvcc.sgml
doc/src/sgml/pageinspect.sgml
doc/src/sgml/pgbuffercache.sgml
doc/src/sgml/pgstatstatements.sgml
doc/src/sgml/pgstattuple.sgml
doc/src/sgml/plpgsql.sgml
doc/src/sgml/pltcl.sgml
doc/src/sgml/postgres.sgml
doc/src/sgml/recovery-config.sgml
doc/src/sgml/ref/allfiles.sgml
doc/src/sgml/ref/alter_database.sgml
doc/src/sgml/ref/alter_large_object.sgml
doc/src/sgml/ref/alter_table.sgml
doc/src/sgml/ref/alter_trigger.sgml
doc/src/sgml/ref/alter_user_mapping.sgml
doc/src/sgml/ref/copy.sgml
doc/src/sgml/ref/create_aggregate.sgml
doc/src/sgml/ref/create_database.sgml
doc/src/sgml/ref/create_function.sgml
doc/src/sgml/ref/create_index.sgml
doc/src/sgml/ref/create_table.sgml
doc/src/sgml/ref/create_tablespace.sgml
doc/src/sgml/ref/create_user_mapping.sgml
doc/src/sgml/ref/create_view.sgml
doc/src/sgml/ref/drop_foreign_data_wrapper.sgml
doc/src/sgml/ref/drop_server.sgml
doc/src/sgml/ref/drop_trigger.sgml
doc/src/sgml/ref/drop_user_mapping.sgml
doc/src/sgml/ref/notify.sgml
doc/src/sgml/ref/pg_ctl-ref.sgml
doc/src/sgml/ref/pg_resetxlog.sgml
doc/src/sgml/ref/pgbench.sgml
doc/src/sgml/ref/pgupgrade.sgml
doc/src/sgml/ref/postgres-ref.sgml
doc/src/sgml/ref/select.sgml
doc/src/sgml/ref/update.sgml
doc/src/sgml/ref/vacuum.sgml
doc/src/sgml/ref/vacuumdb.sgml
doc/src/sgml/reference.sgml
doc/src/sgml/regress.sgml
doc/src/sgml/release.sgml
doc/src/sgml/rules.sgml
doc/src/sgml/runtime.sgml
doc/src/sgml/start.sgml
doc/src/sgml/trigger.sgml
doc/src/sgml/wal.sgml
doc/src/sgml/xaggr.sgml
doc/src/sgml/xfunc.sgml
src/Makefile
src/Makefile.global.in
src/Makefile.shlib
src/backend/Makefile
src/backend/access/common/heaptuple.c
src/backend/access/common/printtup.c
src/backend/access/hash/hashfunc.c
src/backend/access/heap/heapam.c
src/backend/access/heap/pruneheap.c
src/backend/access/heap/tuptoaster.c
src/backend/access/rmgrdesc/Makefile
src/backend/access/rmgrdesc/smgrdesc.c
src/backend/access/rmgrdesc/xactdesc.c
src/backend/access/transam/Makefile
src/backend/access/transam/clog.c
src/backend/access/transam/rmgr.c
src/backend/access/transam/slru.c
src/backend/access/transam/subtrans.c
src/backend/access/transam/transam.c
src/backend/access/transam/twophase.c
src/backend/access/transam/varsup.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xlogutils.c
src/backend/bootstrap/bootstrap.c
src/backend/catalog/Makefile
src/backend/catalog/catalog.c
src/backend/catalog/dependency.c
src/backend/catalog/genbki.pl
src/backend/catalog/heap.c
src/backend/catalog/index.c
src/backend/catalog/namespace.c
src/backend/catalog/pg_aggregate.c
src/backend/catalog/pg_proc.c
src/backend/catalog/storage.c
src/backend/commands/aggregatecmds.c
src/backend/commands/analyze.c
src/backend/commands/cluster.c
src/backend/commands/comment.c
src/backend/commands/copy.c
src/backend/commands/dbcommands.c
src/backend/commands/event_trigger.c
src/backend/commands/explain.c
src/backend/commands/extension.c
src/backend/commands/foreigncmds.c
src/backend/commands/indexcmds.c
src/backend/commands/matview.c
src/backend/commands/portalcmds.c
src/backend/commands/prepare.c
src/backend/commands/schemacmds.c
src/backend/commands/sequence.c
src/backend/commands/tablecmds.c
src/backend/commands/tablespace.c
src/backend/commands/trigger.c
src/backend/commands/vacuum.c
src/backend/commands/variable.c
src/backend/commands/view.c
src/backend/executor/Makefile
src/backend/executor/execAmi.c
src/backend/executor/execCurrent.c
src/backend/executor/execMain.c
src/backend/executor/execProcnode.c
src/backend/executor/execQual.c
src/backend/executor/execTuples.c
src/backend/executor/execUtils.c
src/backend/executor/functions.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeForeignscan.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeNestloop.c
src/backend/executor/nodeSubplan.c
src/backend/executor/nodeWindowAgg.c
src/backend/executor/spi.c
src/backend/libpq/be-fsstubs.c
src/backend/libpq/be-secure.c
src/backend/main/main.c
src/backend/nodes/bitmapset.c
src/backend/nodes/copyfuncs.c
src/backend/nodes/equalfuncs.c
src/backend/nodes/makefuncs.c
src/backend/nodes/nodeFuncs.c
src/backend/nodes/outfuncs.c
src/backend/nodes/print.c
src/backend/nodes/readfuncs.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/plan/createplan.c
src/backend/optimizer/plan/planagg.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/plan/setrefs.c
src/backend/optimizer/plan/subselect.c
src/backend/optimizer/prep/prepjointree.c
src/backend/optimizer/prep/preptlist.c
src/backend/optimizer/prep/prepunion.c
src/backend/optimizer/util/pathnode.c
src/backend/optimizer/util/plancat.c
src/backend/optimizer/util/relnode.c
src/backend/parser/analyze.c
src/backend/parser/gram.y
src/backend/parser/parse_agg.c
src/backend/parser/parse_clause.c
src/backend/parser/parse_coerce.c
src/backend/parser/parse_expr.c
src/backend/parser/parse_relation.c
src/backend/parser/parse_target.c
src/backend/parser/parse_type.c
src/backend/parser/parse_utilcmd.c
src/backend/parser/parser.c
src/backend/parser/scan.l
src/backend/postmaster/autovacuum.c
src/backend/postmaster/pgstat.c
src/backend/postmaster/postmaster.c
src/backend/replication/logical/decode.c
src/backend/replication/logical/logicalfuncs.c
src/backend/replication/syncrep.c
src/backend/rewrite/rewriteHandler.c
src/backend/rewrite/rowsecurity.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/file/fd.c
src/backend/storage/file/reinit.c
src/backend/storage/ipc/ipci.c
src/backend/storage/ipc/procarray.c
src/backend/storage/ipc/procsignal.c
src/backend/storage/lmgr/lmgr.c
src/backend/storage/lmgr/lock.c
src/backend/storage/lmgr/lwlock.c
src/backend/storage/lmgr/predicate.c
src/backend/storage/lmgr/proc.c
src/backend/tcop/dest.c
src/backend/tcop/postgres.c
src/backend/tcop/pquery.c
src/backend/tcop/utility.c
src/backend/utils/adt/array_userfuncs.c
src/backend/utils/adt/arrayfuncs.c
src/backend/utils/adt/date.c
src/backend/utils/adt/dbsize.c
src/backend/utils/adt/float.c
src/backend/utils/adt/json.c
src/backend/utils/adt/jsonb.c
src/backend/utils/adt/lockfuncs.c
src/backend/utils/adt/misc.c
src/backend/utils/adt/numeric.c
src/backend/utils/adt/pseudotypes.c
src/backend/utils/adt/ri_triggers.c
src/backend/utils/adt/rowtypes.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/selfuncs.c
src/backend/utils/adt/timestamp.c
src/backend/utils/adt/varlena.c
src/backend/utils/adt/version.c
src/backend/utils/cache/inval.c
src/backend/utils/cache/lsyscache.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/syscache.c
src/backend/utils/errcodes.txt
src/backend/utils/error/elog.c
src/backend/utils/init/globals.c
src/backend/utils/init/miscinit.c
src/backend/utils/init/postinit.c
src/backend/utils/misc/guc.c
src/backend/utils/misc/postgresql.conf.sample
src/backend/utils/mmgr/mcxt.c
src/backend/utils/mmgr/portalmem.c
src/backend/utils/resowner/resowner.c
src/backend/utils/sort/tuplesort.c
src/backend/utils/sort/tuplestore.c
src/backend/utils/time/combocid.c
src/backend/utils/time/snapmgr.c
src/backend/utils/time/tqual.c
src/bin/Makefile
src/bin/initdb/initdb.c
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dump.h
src/bin/pg_dump/pg_dumpall.c
src/bin/pg_rewind/filemap.c
src/bin/pg_xlogdump/rmgrdesc.c
src/bin/pgbench/pgbench.c
src/bin/psql/command.c
src/bin/psql/describe.c
src/bin/psql/startup.c
src/bin/psql/tab-complete.c
src/common/Makefile
src/common/relpath.c
src/include/Makefile
src/include/access/hash.h
src/include/access/htup.h
src/include/access/rmgrlist.h
src/include/access/sysattr.h
src/include/access/transam.h
src/include/access/twophase.h
src/include/access/xact.h
src/include/access/xlog.h
src/include/bootstrap/bootstrap.h
src/include/c.h
src/include/catalog/catalog.h
src/include/catalog/dependency.h
src/include/catalog/heap.h
src/include/catalog/indexing.h
src/include/catalog/namespace.h
src/include/catalog/pg_aggregate.h
src/include/catalog/pg_class.h
src/include/catalog/pg_namespace.h
src/include/catalog/pg_proc.h
src/include/catalog/pg_type.h
src/include/commands/dbcommands.h
src/include/commands/explain.h
src/include/commands/prepare.h
src/include/commands/schemacmds.h
src/include/commands/sequence.h
src/include/commands/tablecmds.h
src/include/commands/trigger.h
src/include/commands/vacuum.h
src/include/commands/variable.h
src/include/common/relpath.h
src/include/executor/execdesc.h
src/include/executor/executor.h
src/include/executor/spi.h
src/include/executor/tuptable.h
src/include/libpq/libpq-be.h
src/include/miscadmin.h
src/include/nodes/bitmapset.h
src/include/nodes/execnodes.h
src/include/nodes/nodes.h
src/include/nodes/params.h
src/include/nodes/parsenodes.h
src/include/nodes/pg_list.h
src/include/nodes/plannodes.h
src/include/nodes/primnodes.h
src/include/nodes/relation.h
src/include/optimizer/cost.h
src/include/optimizer/pathnode.h
src/include/optimizer/planmain.h
src/include/optimizer/planner.h
src/include/parser/analyze.h
src/include/parser/gramparse.h
src/include/parser/kwlist.h
src/include/parser/parse_agg.h
src/include/parser/parse_func.h
src/include/parser/parse_relation.h
src/include/parser/parse_utilcmd.h
src/include/parser/parser.h
src/include/parser/scanner.h
src/include/pg_config.h.in
src/include/pg_config.h.win32
src/include/pgstat.h
src/include/port.h
src/include/postgres.h
src/include/postmaster/autovacuum.h
src/include/rewrite/rewriteHandler.h
src/include/storage/backendid.h
src/include/storage/lock.h
src/include/storage/lwlock.h
src/include/storage/proc.h
src/include/storage/procarray.h
src/include/storage/procsignal.h
src/include/storage/relfilenode.h
src/include/storage/smgr.h
src/include/tcop/dest.h
src/include/tcop/pquery.h
src/include/tcop/tcopprot.h
src/include/tcop/utility.h
src/include/utils/builtins.h
src/include/utils/elog.h
src/include/utils/guc.h
src/include/utils/guc_tables.h
src/include/utils/json.h
src/include/utils/lsyscache.h
src/include/utils/plancache.h
src/include/utils/portal.h
src/include/utils/rel.h
src/include/utils/resowner_private.h
src/include/utils/snapshot.h
src/include/utils/syscache.h
src/include/utils/timestamp.h
src/include/utils/tuplesort.h
src/include/utils/tuplestore.h
src/interfaces/libpq/fe-auth.c
src/pl/plpgsql/src/pl_exec.c
src/pl/plpgsql/src/pl_gram.y
src/port/getpeereid.c
src/test/regress/expected/aggregates.out
src/test/regress/expected/alter_generic.out
src/test/regress/expected/alter_table.out
src/test/regress/expected/arrays.out
src/test/regress/expected/box.out
src/test/regress/expected/brin.out
src/test/regress/expected/case.out
src/test/regress/expected/cluster.out
src/test/regress/expected/copy2.out
src/test/regress/expected/create_index.out
src/test/regress/expected/create_table_like.out
src/test/regress/expected/create_view.out
src/test/regress/expected/date.out
src/test/regress/expected/domain.out
src/test/regress/expected/drop_if_exists.out
src/test/regress/expected/event_trigger.out
src/test/regress/expected/float8.out
src/test/regress/expected/foreign_data.out
src/test/regress/expected/foreign_key.out
src/test/regress/expected/gist.out
src/test/regress/expected/groupingsets.out
src/test/regress/expected/inherit.out
src/test/regress/expected/insert.out
src/test/regress/expected/insert_conflict.out
src/test/regress/expected/int2.out
src/test/regress/expected/int4.out
src/test/regress/expected/int8.out
src/test/regress/expected/join.out
src/test/regress/expected/matview.out
src/test/regress/expected/name.out
src/test/regress/expected/numeric.out
src/test/regress/expected/object_address.out
src/test/regress/expected/opr_sanity.out
src/test/regress/expected/plancache.out
src/test/regress/expected/plpgsql.out
src/test/regress/expected/prepared_xacts.out
src/test/regress/expected/rangefuncs.out
src/test/regress/expected/rangetypes.out
src/test/regress/expected/replica_identity.out
src/test/regress/expected/roleattributes.out
src/test/regress/expected/rolenames.out
src/test/regress/expected/rowsecurity.out
src/test/regress/expected/rowtypes.out
src/test/regress/expected/rules.out
src/test/regress/expected/sanity_check.out
src/test/regress/expected/select.out
src/test/regress/expected/select_distinct.out
src/test/regress/expected/select_into.out
src/test/regress/expected/sequence.out
src/test/regress/expected/sequence_1.out
src/test/regress/expected/stats.out
src/test/regress/expected/tablesample.out
src/test/regress/expected/text.out
src/test/regress/expected/timestamp.out
src/test/regress/expected/timestamptz.out
src/test/regress/expected/transactions.out
src/test/regress/expected/triggers.out
src/test/regress/expected/tsearch.out
src/test/regress/expected/txid.out
src/test/regress/expected/updatable_views.out
src/test/regress/expected/vacuum.out
src/test/regress/expected/with.out
src/test/regress/expected/xml.out
src/test/regress/expected/xml_1.out
src/test/regress/input/create_function_2.source
src/test/regress/input/largeobject.source
src/test/regress/output/constraints.source
src/test/regress/output/create_function_2.source
src/test/regress/output/misc.source
src/test/regress/output/tablespace.source
src/test/regress/parallel_schedule
src/test/regress/pg_regress.c
src/test/regress/serial_schedule
src/test/regress/sql/aggregates.sql
src/test/regress/sql/alter_table.sql
src/test/regress/sql/arrays.sql
src/test/regress/sql/box.sql
src/test/regress/sql/case.sql
src/test/regress/sql/cluster.sql
src/test/regress/sql/create_index.sql
src/test/regress/sql/create_table.sql
src/test/regress/sql/create_table_like.sql
src/test/regress/sql/date.sql
src/test/regress/sql/domain.sql
src/test/regress/sql/float8.sql
src/test/regress/sql/foreign_data.sql
src/test/regress/sql/foreign_key.sql
src/test/regress/sql/inherit.sql
src/test/regress/sql/insert.sql
src/test/regress/sql/insert_conflict.sql
src/test/regress/sql/int2.sql
src/test/regress/sql/int4.sql
src/test/regress/sql/int8.sql
src/test/regress/sql/join.sql
src/test/regress/sql/matview.sql
src/test/regress/sql/name.sql
src/test/regress/sql/numeric.sql
src/test/regress/sql/opr_sanity.sql
src/test/regress/sql/plpgsql.sql
src/test/regress/sql/portals.sql
src/test/regress/sql/prepared_xacts.sql
src/test/regress/sql/privileges.sql
src/test/regress/sql/rangefuncs.sql
src/test/regress/sql/rangetypes.sql
src/test/regress/sql/rowsecurity.sql
src/test/regress/sql/rowtypes.sql
src/test/regress/sql/rules.sql
src/test/regress/sql/select.sql
src/test/regress/sql/select_distinct.sql
src/test/regress/sql/sequence.sql
src/test/regress/sql/timestamp.sql
src/test/regress/sql/timestamptz.sql
src/test/regress/sql/triggers.sql
src/test/regress/sql/tsearch.sql
src/test/regress/sql/updatable_views.sql
src/test/regress/sql/vacuum.sql
src/test/regress/sql/with.sql
src/test/regress/sql/xml.sql

diff --cc .gitignore
Simple merge
diff --cc configure
index 8711af0d3a596912f4c5d80b4ccb2a84774e5e11,7244c755a76378e72e947f2aa6dd39c69011c64c..cbfcb1815723526b8a0aa991f8a58a77b03f9f80
+++ b/configure
@@@ -1,8 -1,8 +1,8 @@@
  #! /bin/sh
  # Guess values for system-dependent variables and create Makefiles.
- # Generated by GNU Autoconf 2.69 for PostgreSQL 9.5alpha1 (Postgres-XL 9.5alpha1).
 -# Generated by GNU Autoconf 2.69 for PostgreSQL 9.6beta4.
++# Generated by GNU Autoconf 2.69 for PostgreSQL 9.6beta4 (Postgres-XL 9.6alpha1).
  #
 -# Report bugs to <pgsql-bugs@postgresql.org>.
 +# Report bugs to <bugs@postgres-xl.org>.
  #
  #
  # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@@ -582,10 -582,9 +582,10 @@@ MAKEFLAGS
  # Identity of this package.
  PACKAGE_NAME='PostgreSQL'
  PACKAGE_TARNAME='postgresql'
- PACKAGE_VERSION='9.5alpha1 (Postgres-XL 9.5alpha1)'
- PACKAGE_XC_VERSION='9.5alpha1'
- PACKAGE_STRING='PostgreSQL 9.5alpha1 (Postgres-XL 9.5alpha1)'
 -PACKAGE_VERSION='9.6beta4'
 -PACKAGE_STRING='PostgreSQL 9.6beta4'
 -PACKAGE_BUGREPORT='[email protected]'
++PACKAGE_VERSION='9.6beta4 (Postgres-XL 9.6alpha1)'
++PACKAGE_XC_VERSION='9.6alpha1'
++PACKAGE_STRING='PostgreSQL 9.6beta4 (Postgres-XL 9.6alpha1)'
 +PACKAGE_BUGREPORT='[email protected]'
  PACKAGE_URL=''
  
  ac_unique_file="src/backend/access/common/heaptuple.c"
@@@ -1398,7 -1398,7 +1402,7 @@@ if test "$ac_init_help" = "long"; the
    # Omit some internal or obsolete options to make the list less imposing.
    # This message is too long to be a string in the A/UX 3.1 sh.
    cat <<_ACEOF
- \`configure' configures PostgreSQL 9.5alpha1 (Postgres-XL 9.5alpha1) to adapt to many kinds of systems.
 -\`configure' configures PostgreSQL 9.6beta4 to adapt to many kinds of systems.
++\`configure' configures PostgreSQL 9.6beta4 (Postgres-XL 9.6alpha1) to adapt to many kinds of systems.
  
  Usage: $0 [OPTION]... [VAR=VALUE]...
  
  
  if test -n "$ac_init_help"; then
    case $ac_init_help in
-      short | recursive ) echo "Configuration of PostgreSQL 9.5alpha1 (Postgres-XL 9.5alpha1):";;
 -     short | recursive ) echo "Configuration of PostgreSQL 9.6beta4:";;
++     short | recursive ) echo "Configuration of PostgreSQL 9.6beta4 (Postgres-XL 9.6alpha1):";;
     esac
    cat <<\_ACEOF
  
  test -n "$ac_init_help" && exit $ac_status
  if $ac_init_version; then
    cat <<\_ACEOF
- PostgreSQL configure 9.5alpha1 (Postgres-XL 9.5alpha1)
 -PostgreSQL configure 9.6beta4
++PostgreSQL configure 9.6beta4 (Postgres-XL 9.6alpha1)
  generated by GNU Autoconf 2.69
  
  Copyright (C) 2012 Free Software Foundation, Inc.
@@@ -2324,7 -2326,7 +2330,7 @@@ cat >config.log <<_ACEO
  This file contains any messages produced by compilers while
  running configure, to aid debugging if configure makes a mistake.
  
- It was created by PostgreSQL $as_me 9.5alpha1 (Postgres-XL 9.5alpha1), which was
 -It was created by PostgreSQL $as_me 9.6beta4, which was
++It was created by PostgreSQL $as_me 9.6beta4 (Postgres-XL 9.6alpha1), which was
  generated by GNU Autoconf 2.69.  Invocation command line was
  
    $ $0 $@
@@@ -15538,10 -15873,8 +15920,11 @@@ cat >>confdefs.h <<_ACEO
  _ACEOF
  
  
 +# For PGXC, set -DPGXC by default. This can be overriden with -UPGXC if the user sets it.
 +# For Postgres-XL, set both -DPGXC and -DXCP  
 +CFLAGS="-DPGXC -DXCP $CFLAGS"
  
  # Begin output steps
  
  { $as_echo "$as_me:${as_lineno-$LINENO}: using compiler=$cc_string" >&5
@@@ -16100,7 -16433,7 +16483,7 @@@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_wr
  # report actual input values of CONFIG_FILES etc. instead of their
  # values after options handling.
  ac_log="
- This file was extended by PostgreSQL $as_me 9.5alpha1 (Postgres-XL 9.5alpha1), which was
 -This file was extended by PostgreSQL $as_me 9.6beta4, which was
++This file was extended by PostgreSQL $as_me 9.6beta4 (Postgres-XL 9.6alpha1), which was
  generated by GNU Autoconf 2.69.  Invocation command line was
  
    CONFIG_FILES    = $CONFIG_FILES
@@@ -16170,7 -16503,7 +16553,7 @@@ _ACEO
  cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
  ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
  ac_cs_version="\\
- PostgreSQL config.status 9.5alpha1 (Postgres-XL 9.5alpha1)
 -PostgreSQL config.status 9.6beta4
++PostgreSQL config.status 9.6beta4 (Postgres-XL 9.6alpha1)
  configured by $0, generated by GNU Autoconf 2.69,
    with options \\"\$ac_cs_config\\"
  
diff --cc configure.in
index 17b061146e02d447b6262d2aef7811700bf54b13,598fbd8f640770165840bca83ed6a68dd25b4c93..3c77bebcdce6385571f759e85f3fc6fe1e1cc4a5
@@@ -17,7 -17,7 +17,7 @@@ dnl Read the Autoconf manual for detail
  dnl
  m4_pattern_forbid(^PGAC_)dnl to catch undefined macros
  
- AC_INIT([PostgreSQL], [9.5alpha1 (Postgres-XL 9.5alpha1)], [[email protected]])
 -AC_INIT([PostgreSQL], [9.6beta4], [[email protected]])
++AC_INIT([PostgreSQL], [9.6beta4 (Postgres-XL 9.6alpha1)], [[email protected]])
  
  m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required.
  Untested combinations of 'autoconf' and PostgreSQL versions are not
index 7ed90d4df734e742e162732c634f5bbcbc038228,25263c0be9494a5ee7943190088e184e4ebcb3cd..fedc61b243eb9fe4279df2d55e9e3f8cc1f3f13f
@@@ -37,9 -38,7 +38,10 @@@ SUBDIRS = 
                pgcrypto        \
                pgrowlocks      \
                pgstattuple     \
 +              pgxc_clean      \
 +              pgxc_ctl        \
 +              pgxc_monitor \
+               pg_visibility   \
                postgres_fdw    \
                seg             \
                spi             \
index 7222731f3a79a69cf26c76b7e146236debe1ecca,e39d3eee61bb5b59f72d0aae452acc7340c38783..3623f9d91ce695d685d70d015ecf9c676b8e1d83
mode 100755,100644..100755
@@@ -3,10 -3,11 +3,11 @@@
  MODULES = citext
  
  EXTENSION = citext
- DATA = citext--1.1.sql citext--1.0--1.1.sql citext--unpackaged--1.0.sql
+ DATA = citext--1.3.sql citext--1.2--1.3.sql citext--1.1--1.2.sql \
+       citext--1.0--1.1.sql citext--unpackaged--1.0.sql
  PGFILEDESC = "citext - case-insensitive character string data type"
  
 -REGRESS = citext
 +REGRESS = citext xl_citext
  
  ifdef USE_PGXS
  PG_CONFIG = pg_config
index d2791a5eb4695d65779cf1eb11e12e2a0b73b960,462d42a3bd822b2b4625ab1d8b1e9f1de09bd6bf..6515fbd571eed16e645377183e2f9d5e53864577
@@@ -519,19 -502,17 +502,17 @@@ SELECT name FROM srt WHERE name !~ 'A$
  (3 rows)
  
  -- SIMILAR TO should be case-insensitive.
 -SELECT name FROM srt WHERE name SIMILAR TO '%a.*';
 +SELECT name FROM srt WHERE name SIMILAR TO '%a.*' order by name;
   name 
  ------
-  AAA
-  aba
- (2 rows)
+  ABA
+ (1 row)
  
 -SELECT name FROM srt WHERE name SIMILAR TO '%A.*';
 +SELECT name FROM srt WHERE name SIMILAR TO '%A.*' order by name;
   name 
  ------
-  AAA
-  aba
- (2 rows)
+  ABA
+ (1 row)
  
  -- Explicit casts.
  SELECT true::citext = 'true' AS t;
Simple merge
Simple merge
Simple merge
Simple merge
index 342e75ee48d30470d6e103bebcb71917d8b3381d,c101603e6cdd11cee7816cde5045b71226f1c010..39ecd6080424ddd2cd046b99a7008e8e617ccf03
mode 100755,100644..100755
@@@ -6,10 -6,10 +6,10 @@@ OBJS =        ltree_io.o ltree_op.o lquery_op
  PG_CPPFLAGS = -DLOWER_NODE
  
  EXTENSION = ltree
- DATA = ltree--1.0.sql ltree--unpackaged--1.0.sql
+ DATA = ltree--1.1.sql ltree--1.0--1.1.sql ltree--unpackaged--1.0.sql
  PGFILEDESC = "ltree - hierarchical label data type"
  
 -REGRESS = ltree
 +REGRESS = ltree xl_ltree
  
  ifdef USE_PGXS
  PG_CONFIG = pg_config
Simple merge
Simple merge
index 561f4216a96d3e435edbc2df00a87e715ad261d4,55347721968e3f59cf5bc55a08d433fcc1f51552..b0ec8a3f02c4a7c68470e9af3073d5ebfbd9f0f3
@@@ -27,7 -27,7 +27,7 @@@ System Configuration
  
    Operating System (example: Linux 2.4.18)    :
  
-   PostgreSQL version (example: PostgreSQL 9.5alpha1):  Postgres-XL 9.5alpha1
 -  PostgreSQL version (example: PostgreSQL 9.6beta4):  PostgreSQL 9.6beta4
++  PostgreSQL version (example: PostgreSQL 9.6beta4):  Postgres-XL 9.6alpha1
  
    Compiler used (example: gcc 3.3.5)          :
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 2752ac850f544a04399dcd84b5bc40059cfee8ba,c8708ecf8bbe923339f2565e462f442c0363e62d..fdc7a0753474cb516d11ab2f68b1fe1f1fad5d46
@@@ -132,10 -133,7 +133,11 @@@ CREATE EXTENSION <replaceable>module_na
   &pgstatstatements;
   &pgstattuple;
   &pgtrgm;
 + &pgxcclean;
 + &pgxcctl;
 + &pgxcddl;
 + &pgxcmonitor;
+  &pgvisibility;
   &postgres-fdw;
   &seg;
   &sepgsql;
Simple merge
Simple merge
index 70af9e066473a46e8354eac07c0150818b8af3b5,a393813b3809df196fed11dcb266bb4950e14823..f7841719831b4232f8ae90c0e2a833914fa88ee7
mode 100755,100644..100755
@@@ -737,14 -635,16 +728,24 @@@ CREATE TABLE example 
      usually best to follow it.
     </para>
  
 +   <para>
 +    As mentioned when discussing <type>UNIQUE</> constraint, the distribution column
 +    must be included in <type>PRIMARY KEY</type>.  Other restrictions
 +    apply to the <type>PRIMARY KEY</> as well.  When an expression is used on
 +    a <type>PRIMARY KEY</> constraint, this expression must contain
 +    the distribution column of its parent table. It cannot use other
 +    columns as well.
 +   </para>
+    <para>
+     Primary keys are useful both for
+     documentation purposes and for client applications.  For example,
+     a GUI application that allows modifying row values probably needs
+     to know the primary key of a table to be able to identify rows
+     uniquely.  There are also various ways in which the database system
+     makes use of a primary key if one has been declared; for example,
+     the primary key defines the default target column(s) for foreign keys
+     referencing its table.
+    </para>
    </sect2>
  
    <sect2 id="ddl-constraints-fk">
index 3e7a67ef9b73fc445189b252b1fab9e46da73026,43837114baee804cf39c9e7c2024bb1b884f9f4b..eba66890885cf249492788fb580fcf77bc93a1de
  <!ENTITY pgstatstatements SYSTEM "pgstatstatements.sgml">
  <!ENTITY pgstattuple     SYSTEM "pgstattuple.sgml">
  <!ENTITY pgtrgm          SYSTEM "pgtrgm.sgml">
+ <!ENTITY pgvisibility    SYSTEM "pgvisibility.sgml">
  <!ENTITY postgres-fdw    SYSTEM "postgres-fdw.sgml">
 +<!ENTITY pgxcclean       SYSTEM "pgxcclean.sgml">
 +<!ENTITY pgxcctl         SYSTEM "pgxc_ctl-ref.sgml">
 +<!ENTITY pgxcddl         SYSTEM "pgxcddl.sgml">
 +<!ENTITY pgxcmonitor     SYSTEM "pgxcmonitor.sgml">
  <!ENTITY seg             SYSTEM "seg.sgml">
  <!ENTITY contrib-spi     SYSTEM "contrib-spi.sgml">
  <!ENTITY sepgsql         SYSTEM "sepgsql.sgml">
  <!ENTITY sourcerepo SYSTEM "sourcerepo.sgml">
  
  <!ENTITY release    SYSTEM "release.sgml">
+ <!ENTITY release-9.6    SYSTEM "release-9.6.sgml">
 +<!ENTITY release-xl-9.5r1    SYSTEM "release-xl-9.5r1.sgml">
  <!ENTITY release-9.5    SYSTEM "release-9.5.sgml">
  <!ENTITY release-9.4    SYSTEM "release-9.4.sgml">
  <!ENTITY release-9.3    SYSTEM "release-9.3.sgml">
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 78092caeb37bdc74cba126158e8ce98c3adfc34b,84bc7beb5adf4dfabbb3c6a16f692cab8e896dfb..ae7f5a7d317f198d9409c40f3824a30e452f94aa
@@@ -1,27 -1,11 +1,27 @@@
  <!-- doc/src/sgml/legal.sgml -->
  
- <date>2015</date>
+ <date>2016</date>
  
  <copyright>
-  <year>1996-2015</year>
+  <year>1996-2016</year>
   <holder>The PostgreSQL Global Development Group</holder>
  </copyright>
 +<copyright>
 + <year>2014-2016</year>
 + <holder>Postgres-XL Development Group</holder>
 +</copyright>
 +<copyright>
 + <year>2009-2012</year>
 + <holder>Postgres-XC Development Group</holder>
 +</copyright>
 +<copyright>
 + <year>2012-2014</year>
 + <holder>TransLattice, Inc.</holder>
 +</copyright>
 +<copyright>
 + <year>2015-2016</year>
 + <holder>2ndQuadrant Ltd</holder>
 +</copyright>
  
  <legalnotice id="legalnotice">
   <title>Legal Notice</title>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 0deb459f1fd3dc34c3596ff0fc0cf4333a11525d,77667bdebd1e68028658a0274da3ad32e89b4def..0c5e3b350e0f23ad0267e2fda4deadd41b931b16
@@@ -54,8 -52,8 +54,9 @@@ Complete list of usable sgml source fil
  <!ENTITY commit             SYSTEM "commit.sgml">
  <!ENTITY commitPrepared     SYSTEM "commit_prepared.sgml">
  <!ENTITY copyTable          SYSTEM "copy.sgml">
+ <!ENTITY createAccessMethod SYSTEM "create_access_method.sgml">
  <!ENTITY createAggregate    SYSTEM "create_aggregate.sgml">
 +<!ENTITY createBarrier      system "create_barrier.sgml">
  <!ENTITY createCast         SYSTEM "create_cast.sgml">
  <!ENTITY createCollation    SYSTEM "create_collation.sgml">
  <!ENTITY createConversion   SYSTEM "create_conversion.sgml">
Simple merge
index e5a677925dba0ea4f743bdb2972dda304df08f26,6f51cbc8962cc674bbaccbf51315cfcc0ed14701..8deb80ab63c052eb7eac0c4aad3866437736b070
mode 100755,100644..100755
Simple merge
Simple merge
index c8ebcac1c691fc656039d15471c46c2faca96ada,6a8acfb4f9ce2c396e386f9ca2dfbe07e142d1f4..c676b82b349d57630453915757f874873c110029
@@@ -28,8 -27,10 +28,11 @@@ CREATE AGGREGATE <replaceable class="pa
      [ , SSPACE = <replaceable class="PARAMETER">state_data_size</replaceable> ]
      [ , FINALFUNC = <replaceable class="PARAMETER">ffunc</replaceable> ]
      [ , FINALFUNC_EXTRA ]
+     [ , COMBINEFUNC = <replaceable class="PARAMETER">combinefunc</replaceable> ]
+     [ , SERIALFUNC = <replaceable class="PARAMETER">serialfunc</replaceable> ]
+     [ , DESERIALFUNC = <replaceable class="PARAMETER">deserialfunc</replaceable> ]
      [ , INITCOND = <replaceable class="PARAMETER">initial_condition</replaceable> ]
 +    [ , INITCOLLECT = <replaceable class="PARAMETER">initial_collection_condition</replaceable> ]
      [ , MSFUNC = <replaceable class="PARAMETER">msfunc</replaceable> ]
      [ , MINVFUNC = <replaceable class="PARAMETER">minvfunc</replaceable> ]
      [ , MSTYPE = <replaceable class="PARAMETER">mstate_data_type</replaceable> ]
@@@ -49,7 -50,7 +53,8 @@@ CREATE AGGREGATE <replaceable class="pa
      [ , FINALFUNC = <replaceable class="PARAMETER">ffunc</replaceable> ]
      [ , FINALFUNC_EXTRA ]
      [ , INITCOND = <replaceable class="PARAMETER">initial_condition</replaceable> ]
 +    [ , INITCOLLECT = <replaceable class="PARAMETER">initial_collection_condition</replaceable> ]
+     [ , PARALLEL = { SAFE | RESTRICTED | UNSAFE } ]
      [ , HYPOTHETICAL ]
  )
  
@@@ -63,8 -63,10 +68,11 @@@ CREATE AGGREGATE <replaceable class="PA
      [ , SSPACE = <replaceable class="PARAMETER">state_data_size</replaceable> ]
      [ , FINALFUNC = <replaceable class="PARAMETER">ffunc</replaceable> ]
      [ , FINALFUNC_EXTRA ]
+     [ , COMBINEFUNC = <replaceable class="PARAMETER">combinefunc</replaceable> ]
+     [ , SERIALFUNC = <replaceable class="PARAMETER">serialfunc</replaceable> ]
+     [ , DESERIALFUNC = <replaceable class="PARAMETER">deserialfunc</replaceable> ]
      [ , INITCOND = <replaceable class="PARAMETER">initial_condition</replaceable> ]
 +    [ , INITCOLLECT = <replaceable class="PARAMETER">initial_collection_condition</replaceable> ]
      [ , MSFUNC = <replaceable class="PARAMETER">msfunc</replaceable> ]
      [ , MINVFUNC = <replaceable class="PARAMETER">minvfunc</replaceable> ]
      [ , MSTYPE = <replaceable class="PARAMETER">mstate_data_type</replaceable> ]
Simple merge
Simple merge
Simple merge
index 0c9b752c95c0620db902ca65336017b0ac471728,bf2ad64d66e3a40011a06a9904bce0a70aae09d6..2b842d5fb980b7d71e787d3e7952c15f74605a02
mode 100755,100644..100755
@@@ -544,16 -529,12 +551,18 @@@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORAR
       </para>
  
       <para>
-       The primary key constraint should name a set of columns that is
-       different from other sets of columns named by any unique
-       constraint defined for the same table.
+       <literal>PRIMARY KEY</literal> enforces the same data constraints as
+       a combination of <literal>UNIQUE</> and <literal>NOT NULL</>, but
+       identifying a set of columns as the primary key also provides metadata
+       about the design of the schema, since a primary key implies that other
+       tables can rely on this set of columns as a unique identifier for rows.
       </para>
 +
 +     <para>
 +      In <productname>Postgres-XL</>, if <command>DISTRIBUTE BY REPLICATION</> is not specified, the
 +      distribution key must be included in the set of primary key
 +      columns.
 +     </para>
      </listitem>
     </varlistentry>
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index de64adf78944e4c6eee8de57640c3331bbd552da,fd9d0be6f44c40a567d69896dbe934bb069bf076..e72021764a349e73a3cc534360b5241f043f1d9b
@@@ -222,12 -281,13 +281,19 @@@ PostgreSQL documentatio
     <command>pg_resetxlog</command> to run.  But before you do
     so, make doubly certain that there is no server process still alive.
    </para>
 +
 +  <para>
 +   In <productname>Postgres-XL</>, <command>pg_resetxlog</command>
 +   will only run locally for Coordinators and Datanodes.  You should run it
 +   for each Coordinator or Datanode manually.
 +  </para>
   </refsect1>
  
+  <refsect1>
+   <title>See Also</title>
+   <simplelist type="inline">
+    <member><xref linkend="app-pgcontroldata"></member>
+   </simplelist>
+  </refsect1>
  </refentry>
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 1d71d95d473a90801ee87a969880cd99e7db60ef,8acdff1393fd2d1460f5e707c8ada4840bdf8620..adfd95aea019a881d8959450a6b72c5400a1c6e0
@@@ -82,8 -80,8 +82,9 @@@
     &commit;
     &commitPrepared;
     &copyTable;
+    &createAccessMethod;
     &createAggregate;
 +   &createBarrier;
     &createCast;
     &createCollation;
     &createConversion;
Simple merge
index 70e53afc99c0e39bc61274a7346403b1746baf7e,472c1f6f128d15135f48c1384b467cd67c7c2ae7..eace76cace728b9b586c1e4ad7c789283149b94f
@@@ -73,7 -73,7 +73,8 @@@ For new features, add links to the docu
    The reason for splitting the release notes this way is so that appropriate
    subsets can easily be copied into back branches.
  -->
+ &release-9.6;
 +&release-xl-9.5r1;
  &release-9.5;
  &release-9.4;
  &release-9.3;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc src/Makefile
index b2295597bf92176ad89fcc0cc9266cdf4c332495,b526be798596b4da3472d0701ac2133600f4b059..5706bb13352b6348e984dc4a51441eead78318a6
@@@ -22,7 -20,9 +22,8 @@@ SUBDIRS = 
        backend/utils/mb/conversion_procs \
        backend/snowball \
        include \
 -      interfaces \
        backend/replication/libpqwalreceiver \
+       fe_utils \
        bin \
        pl \
        makefiles \
index de335abf0bcfb46ffac47d7538ee30614501c156,c211a2d2e753fe7ec10237af855618d8aeb6be8d..64b7b73d162f4fa335027021ae1f79c8aafcc387
@@@ -37,12 -37,27 +37,28 @@@ all
  
  # PostgreSQL version number
  VERSION = @PACKAGE_VERSION@
 +XLVERSION = @PACKAGE_XC_VERSION@
  MAJORVERSION = @PG_MAJORVERSION@
+ VERSION_NUM = @PG_VERSION_NUM@
  
- # Support for VPATH builds
- # (PGXS VPATH support is handled separately in pgxs.mk)
- ifndef PGXS
+ # Set top_srcdir, srcdir, and VPATH.
+ ifdef PGXS
+ top_srcdir = $(top_builddir)
+ # If VPATH is set or Makefile is not in current directory we are building
+ # the extension with VPATH so we set the variable here.
+ ifdef VPATH
+ srcdir = $(VPATH)
+ else
+ ifeq ($(CURDIR),$(dir $(firstword $(MAKEFILE_LIST))))
+ srcdir = .
+ VPATH =
+ else
+ srcdir = $(dir $(firstword $(MAKEFILE_LIST)))
+ VPATH = $(srcdir)
+ endif
+ endif
+ else # not PGXS
  vpath_build = @vpath_build@
  abs_top_builddir = @abs_top_builddir@
  abs_top_srcdir = @abs_top_srcdir@
Simple merge
Simple merge
index 79766121c3229136d2ba6c465858636c5ac8cfa9,6d0f3f37673a5d3a48358149213405f7bbadde43..15a18a51cc4924f4750109890084230e1089fc0e
@@@ -45,8 -45,7 +45,8 @@@
   * and we'd like to still refer to them via C struct offsets.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index 31712da34bc5380bc9bd0abf2b920bd98d4b3fd1,d9664aa6c6b918cbf3a322f37ad77784f9bd64bd..5679a18d62ffe91017d5c5aebcc69a17dd9c3a01
@@@ -5,8 -5,7 +5,8 @@@
   *      clients and standalone backends are supported here).
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
Simple merge
index f4a06de5d6bee3c702ab554c26057a55a98d6665,24bd9be5e17cefebf33c52823ceca3a17e0c8b5a..123308270f6dd76f01e3b19ecb6285a786cfb5c6
@@@ -1471,29 -1527,48 +1534,69 @@@ heap_rescan(HeapScanDesc scan
         * reinitialize scan descriptor
         */
        initscan(scan, key, true);
+       /*
+        * reset parallel scan, if present
+        */
+       if (scan->rs_parallel != NULL)
+       {
+               ParallelHeapScanDesc parallel_scan;
+               /*
+                * Caller is responsible for making sure that all workers have
+                * finished the scan before calling this, so it really shouldn't be
+                * necessary to acquire the mutex at all.  We acquire it anyway, just
+                * to be tidy.
+                */
+               parallel_scan = scan->rs_parallel;
+               SpinLockAcquire(&parallel_scan->phs_mutex);
+               parallel_scan->phs_cblock = parallel_scan->phs_startblock;
+               SpinLockRelease(&parallel_scan->phs_mutex);
+       }
+ }
+ /* ----------------
+  *            heap_rescan_set_params  - restart a relation scan after changing params
+  *
+  * This call allows changing the buffer strategy, syncscan, and pagemode
+  * options before starting a fresh scan.  Note that although the actual use
+  * of syncscan might change (effectively, enabling or disabling reporting),
+  * the previously selected startblock will be kept.
+  * ----------------
+  */
+ void
+ heap_rescan_set_params(HeapScanDesc scan, ScanKey key,
+                                          bool allow_strat, bool allow_sync, bool allow_pagemode)
+ {
+       /* adjust parameters */
+       scan->rs_allow_strat = allow_strat;
+       scan->rs_allow_sync = allow_sync;
+       scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(scan->rs_snapshot);
+       /* ... and rescan */
+       heap_rescan(scan, key);
  }
  
 +/* ----------------
 + *            heap_rescan_set_params  - restart a relation scan after changing params
 + *
 + * This call allows changing the buffer strategy, syncscan, and pagemode
 + * options before starting a fresh scan.  Note that although the actual use
 + * of syncscan might change (effectively, enabling or disabling reporting),
 + * the previously selected startblock will be kept.
 + * ----------------
 + */
 +void
 +heap_rescan_set_params(HeapScanDesc scan, ScanKey key,
 +                                         bool allow_strat, bool allow_sync, bool allow_pagemode)
 +{
 +      /* adjust parameters */
 +      scan->rs_allow_strat = allow_strat;
 +      scan->rs_allow_sync = allow_sync;
 +      scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(scan->rs_snapshot);
 +      /* ... and rescan */
 +      heap_rescan(scan, key);
 +}
 +
  /* ----------------
   *            heap_endscan    - end relation scan
   *
index 0d605f5728b35bcafc114bd2a241b90bdb0e4d1b,6ff92516eda9e44f82f0be3ff08a90a2e5df6867..200861eef168701e091844c8fc380460f5efe377
@@@ -97,10 -104,11 +104,12 @@@ heap_page_prune_opt(Relation relation, 
                RelationIsAccessibleInLogicalDecoding(relation))
                OldestXmin = RecentGlobalXmin;
        else
-               OldestXmin = RecentGlobalDataXmin;
+               OldestXmin =
+                       TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
+                                                                                               relation);
  
 -      Assert(TransactionIdIsValid(OldestXmin));
 +      if (!TransactionIdIsValid(OldestXmin))
 +              return;
  
        /*
         * Let's see if we really need pruning.
Simple merge
index 654d3c54e64139c002f1befde61818dde3de3cac,5514db1dda6ceaf95d3ef0ef37e66bddc82af420..da5ca00d76c46be67c9d12b28118569300817eb6
@@@ -8,9 -8,9 +8,9 @@@ subdir = src/backend/access/rmgrdes
  top_builddir = ../../../..
  include $(top_builddir)/src/Makefile.global
  
- OBJS = barrierdesc.o brindesc.o clogdesc.o committsdesc.o dbasedesc.o gindesc.o gistdesc.o \
-          hashdesc.o heapdesc.o mxactdesc.o nbtdesc.o relmapdesc.o \
-          replorigindesc.o seqdesc.o smgrdesc.o spgdesc.o \
-          standbydesc.o tblspcdesc.o xactdesc.o xlogdesc.o
 -OBJS = brindesc.o clogdesc.o committsdesc.o dbasedesc.o genericdesc.o \
++OBJS = barrierdesc.o brindesc.o clogdesc.o committsdesc.o dbasedesc.o genericdesc.o \
+          gindesc.o gistdesc.o hashdesc.o heapdesc.o logicalmsgdesc.o \
+          mxactdesc.o nbtdesc.o relmapdesc.o replorigindesc.o seqdesc.o \
+          smgrdesc.o spgdesc.o standbydesc.o tblspcdesc.o xactdesc.o xlogdesc.o
  
  include $(top_srcdir)/src/backend/common.mk
index e163210a388e02d2409d8871d9705102a79900e6,242d79a136676ce1955ca9e64603da865d10e952..f36b1099a95fa7a207984ac64f97aac0aafd4321
@@@ -35,9 -35,10 +35,10 @@@ smgr_desc(StringInfo buf, XLogReaderSta
        else if (info == XLOG_SMGR_TRUNCATE)
        {
                xl_smgr_truncate *xlrec = (xl_smgr_truncate *) rec;
 -              char       *path = relpathperm(xlrec->rnode, MAIN_FORKNUM);
 +              char       *path = relpathperm_client(xlrec->rnode, MAIN_FORKNUM, "");
  
-               appendStringInfo(buf, "%s to %u blocks", path, xlrec->blkno);
+               appendStringInfo(buf, "%s to %u blocks flags %d", path,
+                                                xlrec->blkno, xlrec->flags);
                pfree(path);
        }
  }
index 630be963ee679c455d0746ade421c62fe7909dab,16fbe47269a9be93ac6a283b93da54aa4b82454e..5704a5caf0368fbec30a4c6707f319ce91f7ab08
@@@ -12,10 -12,10 +12,10 @@@ subdir = src/backend/access/transa
  top_builddir = ../../../..
  include $(top_builddir)/src/Makefile.global
  
- OBJS = clog.o commit_ts.o multixact.o parallel.o rmgr.o slru.o subtrans.o \
-       timeline.o transam.o twophase.o twophase_rmgr.o varsup.o \
+ OBJS = clog.o commit_ts.o generic_xlog.o multixact.o parallel.o rmgr.o slru.o \
+       subtrans.o timeline.o transam.o twophase.o twophase_rmgr.o varsup.o \
        xact.o xlog.o xlogarchive.o xlogfuncs.o \
 -      xloginsert.o xlogreader.o xlogutils.o
 +      xloginsert.o xlogreader.o xlogutils.o gtm.o
  
  include $(top_srcdir)/src/backend/common.mk
  
index fbb8b0fda70f30ac75d5c3c13ea778ccea26c005,263447679b8991ff35796d421bb790e6d2f5dd28..0fcccfc3a700229c650584a0d3d1757e089a64d4
   * for aborts (whether sync or async), since the post-crash assumption would
   * be that such transactions failed anyway.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/backend/access/transam/clog.c
   *
index f24dfc7d1f4f00e46072285e33aedc2ed3bb6440,31c5fd165c0df07f53012eac39374a2f3ab5e4d3..bc875677b097833aaf7fea8ea084b7ca9a74bec9
  #include "commands/dbcommands_xlog.h"
  #include "commands/sequence.h"
  #include "commands/tablespace.h"
 +#ifdef PGXC
 +#include "pgxc/barrier.h"
 +#endif
+ #include "replication/message.h"
  #include "replication/origin.h"
  #include "storage/standby.h"
  #include "utils/relmapper.h"
Simple merge
index 557c70055f4e0e352b432f4a15b6e8591de43617,908fe2d53313b2b2f316f4419dcff910a28cec97..76069546cbccfc50bd3e0686858c58a575cd60e0
@@@ -19,9 -19,8 +19,9 @@@
   * data across crashes.  During database startup, we simply force the
   * currently-active page of SUBTRANS to zeroes.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/backend/access/transam/subtrans.c
   *
Simple merge
index 67b9fb0d743a202e23165cc5cb1f2519fd3693b6,9f55adcaf5ef0e50fe1ea93e4e725c5c7b794215..b65227922bdbce2c7d15f64f0fe08a0a0fe0cd38
@@@ -3,9 -3,8 +3,9 @@@
   * twophase.c
   *            Two-phase commit support functions.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * IDENTIFICATION
   *            src/backend/access/transam/twophase.c
@@@ -133,8 -126,11 +135,11 @@@ int                      max_prepared_xacts = 0
   *
   * typedef struct GlobalTransactionData *GlobalTransaction appears in
   * twophase.h
+  *
+  * Note that the max value of GIDSIZE must fit in the uint16 gidlen,
+  * specified in TwoPhaseFileHeader.
   */
 -#define GIDSIZE 200
 +#define GIDSIZE (200 + (MAX_COORDINATORS + MAX_DATANODES) * 15)
  
  typedef struct GlobalTransactionData
  {
index a1d161d8c9f5e453837da972285431244147d424,2f7e645ace8d85c28489024af956fc194bd18bf7..a4e67d9fc393afe8fbd1c0bc9d4c2471111daafd
@@@ -3,9 -3,7 +3,9 @@@
   * varsup.c
   *      postgres OID & XID variables support routines
   *
-  * Copyright (c) 2000-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
+  * Copyright (c) 2000-2016, PostgreSQL Global Development Group
   *
   * IDENTIFICATION
   *      src/backend/access/transam/varsup.c
index 3cbe81bf7f9f47c543daf83ec6159b5098c05e20,23f36ead7e54e86d0ef1e33a63aa60ade10f8b8f..049aabc20996583bd4e6b7fc57ea6b99146ab3dd
@@@ -5,10 -5,8 +5,10 @@@
   *
   * See src/backend/access/transam/README for more information.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
index 91c189efa80e4968e39776002b997039f188d9fe,f13f9c1fa5e271709a137d517533284c84253afd..19b4921075b91e063658c656c473a5d35aca77e8
  #include "catalog/pg_database.h"
  #include "commands/tablespace.h"
  #include "miscadmin.h"
 +#ifdef PGXC
 +#include "pgxc/barrier.h"
 +#endif
  #include "pgstat.h"
  #include "postmaster/bgwriter.h"
+ #include "postmaster/walwriter.h"
  #include "postmaster/startup.h"
  #include "replication/basebackup.h"
  #include "replication/logical.h"
index 1d5647190b9bf9c54dc9f12dbd3bca1c5289fdc8,51a8e8ddb2ec1fb3d42a4497ea80df3b32ba4bd8..1bdbea655bb646e452aa1fe9b66cc8b58ef4a75d
@@@ -19,8 -19,6 +19,7 @@@
  
  #include <unistd.h>
  
 +#include "miscadmin.h"
  #include "access/xlog.h"
  #include "access/xlog_internal.h"
  #include "access/xlogutils.h"
index 6707724ddff1320574dc83a4f06cca95fa35d618,e518e178bb4b43958a929aa0b998f09d92f5a1b6..86732f73d87138b7cf450662fa302454eab4fb36
@@@ -4,10 -4,8 +4,10 @@@
   *      routines to support running postgres in 'bootstrap' mode
   *    bootstrap mode is used to create the initial template database
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * IDENTIFICATION
   *      src/backend/bootstrap/bootstrap.c
@@@ -28,8 -26,8 +28,9 @@@
  #include "miscadmin.h"
  #include "nodes/makefuncs.h"
  #include "pg_getopt.h"
+ #include "pgstat.h"
  #include "postmaster/bgwriter.h"
 +#include "postmaster/clustermon.h"
  #include "postmaster/startup.h"
  #include "postmaster/walwriter.h"
  #include "replication/walreceiver.h"
index 2680d6ceffa997a2d1e97cacb4287ee7172eaceb,1ce761004979d2c5eb56021ed200fae5a863a22d..240c44d0f0d8969728a8d644e6e131adb1ccbbc6
@@@ -39,9 -39,8 +39,9 @@@ POSTGRES_BKI_SRCS = $(addprefix $(top_s
        pg_ts_config.h pg_ts_config_map.h pg_ts_dict.h \
        pg_ts_parser.h pg_ts_template.h pg_extension.h \
        pg_foreign_data_wrapper.h pg_foreign_server.h pg_user_mapping.h \
 +      pgxc_class.h pgxc_node.h pgxc_group.h \
        pg_foreign_table.h pg_policy.h pg_replication_origin.h \
-       pg_default_acl.h pg_seclabel.h pg_shseclabel.h \
+       pg_default_acl.h pg_init_privs.h pg_seclabel.h pg_shseclabel.h \
        pg_collation.h pg_range.h pg_transform.h \
        toasting.h indexing.h \
      )
index df569f56b3c444e03b54f0772fa79c529f221735,1baaa0bb8988cb348e03dcd38f64706a47e385d2..9bb937aa4c2b09faa39ebc5c982da1eede70412d
@@@ -5,8 -5,7 +5,8 @@@
   *            bits of hard-wired knowledge
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -406,12 -390,7 +407,12 @@@ GetNewRelFileNode(Oid reltablespace, Re
        switch (relpersistence)
        {
                case RELPERSISTENCE_TEMP:
-                       backend = MyBackendId;
 +#ifdef XCP
 +                      if (OidIsValid(MyCoordId))
 +                              backend = MyFirstBackendId;
 +                      else
 +#endif
+                       backend = BackendIdForTempRelations();
                        break;
                case RELPERSISTENCE_UNLOGGED:
                case RELPERSISTENCE_PERMANENT:
index ee0e6c6e270a4f6c7a7716f8c2ae7a08a191fa78,04d78402903074f68465448d58c636252e68b852..a01cdef367eae8084af8700d5eebcdb2695bbe02
@@@ -4,10 -4,8 +4,10 @@@
   *      Routines to support inter-object dependencies.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * IDENTIFICATION
   *      src/backend/catalog/dependency.c
@@@ -168,11 -161,9 +172,12 @@@ static const Oid object_classes[] = 
        UserMappingRelationId,          /* OCLASS_USER_MAPPING */
        DefaultAclRelationId,           /* OCLASS_DEFACL */
        ExtensionRelationId,            /* OCLASS_EXTENSION */
 +#ifdef PGXC
 +      PgxcClassRelationId,            /* OCLASS_PGXCCLASS */
 +#endif
        EventTriggerRelationId,         /* OCLASS_EVENT_TRIGGER */
-       PolicyRelationId                        /* OCLASS_POLICY */
+       PolicyRelationId,                       /* OCLASS_POLICY */
+       TransformRelationId                     /* OCLASS_TRANSFORM */
  };
  
  
Simple merge
index 89e2f1b42be34d7e6b196083529df8a39d41ec3e,e997b574ca9eaf93514817464b3a77467d1a5371..a1df27d43ff465dc57d935b9f3536884f192b5d9
@@@ -3,9 -3,8 +3,9 @@@
   * heap.c
   *      code to create and destroy POSTGRES heap relations
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
Simple merge
index f7fb06197d4c23df1d2188a056a1213ce8d196bc,8fd4c3136bc44726c4d848e1935be9977a30c6e9..5caaef144f388f92ff94653569979c6336cbc6d7
@@@ -9,8 -9,7 +9,8 @@@
   * and implementing search-path-controlled searches.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
index d95d2001a6cf4da37ee8ba26a685b3d7e74c4ba6,959d3845df217455815c6c0e4c4f85b06ada486f..8ada7a06daaa845de754b626ac623e849a55f4f7
@@@ -3,8 -3,7 +3,8 @@@
   * pg_aggregate.c
   *      routines to support manipulation of the pg_aggregate relation
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -57,10 -56,10 +57,13 @@@ AggregateCreate(const char *aggName
                                List *parameterDefaults,
                                Oid variadicArgType,
                                List *aggtransfnName,
 +#ifdef PGXC
 +                              List *aggcollectfnName,
 +#endif
                                List *aggfinalfnName,
+                               List *aggcombinefnName,
+                               List *aggserialfnName,
+                               List *aggdeserialfnName,
                                List *aggmtransfnName,
                                List *aggminvtransfnName,
                                List *aggmfinalfnName,
                                Oid aggmTransType,
                                int32 aggmTransSpace,
                                const char *agginitval,
-                               const char *aggminitval)
 +#ifdef PGXC
 +                              const char *agginitcollect,
 +#endif
+                               const char *aggminitval,
+                               char proparallel)
  {
        Relation        aggdesc;
        HeapTuple       tup;
        Datum           values[Natts_pg_aggregate];
        Form_pg_proc proc;
        Oid                     transfn;
 +#ifdef PGXC
 +      Oid                     collectfn = InvalidOid; /* can be omitted */
 +#endif
        Oid                     finalfn = InvalidOid;   /* can be omitted */
+       Oid                     combinefn = InvalidOid; /* can be omitted */
+       Oid                     serialfn = InvalidOid;  /* can be omitted */
+       Oid                     deserialfn = InvalidOid;                /* can be omitted */
        Oid                     mtransfn = InvalidOid;  /* can be omitted */
        Oid                     minvtransfn = InvalidOid;               /* can be omitted */
        Oid                     mfinalfn = InvalidOid;  /* can be omitted */
index 113793c224204e003feb3eba03cafabf3265ff93,c1d1505e648de84b76ae65aacef578e3bdfadde1..75621bd6e39d8e7f2ec6ec650f20b5f7c7787387
@@@ -3,8 -3,7 +3,8 @@@
   * pg_proc.c
   *      routines to support manipulation of the pg_proc relation
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index 2b59d90141fd5b52022932408d95e44da53d04e9,0d8311c40381811a38cd28b011f802e8f745129d..ea52d55f8ddbbd67b94e54d3570ba263926de45c
@@@ -3,8 -3,7 +3,8 @@@
   * storage.c
   *      code to create and destroy physical storage for relations
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -86,12 -85,7 +86,16 @@@ RelationCreateStorage(RelFileNode rnode
        switch (relpersistence)
        {
                case RELPERSISTENCE_TEMP:
++<<<<<<< HEAD
 +#ifdef XCP
 +                      if (OidIsValid(MyCoordId))
 +                              backend = MyFirstBackendId;
 +                      else
 +#endif
 +                      backend = MyBackendId;
++=======
+                       backend = BackendIdForTempRelations();
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
                        needs_wal = false;
                        break;
                case RELPERSISTENCE_UNLOGGED:
index e1f85ddb308b7bd1cdf82415708e73318f6daaa3,d34c82c5baf7134d9f991dc64ff2bd838fc7eb92..8f3037f1c9b6684496b2d39731972b69a8e8b751
@@@ -4,8 -4,7 +4,8 @@@
   *
   *      Routines for aggregate-manipulation commands
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -178,14 -174,10 +189,16 @@@ DefineAggregate(List *name, List *args
                        initval = defGetString(defel);
                else if (pg_strcasecmp(defel->defname, "initcond1") == 0)
                        initval = defGetString(defel);
 +#ifdef PGXC
 +              else if (pg_strcasecmp(defel->defname, "cfunc") == 0)
 +                      collectfuncName = defGetQualifiedName(defel);
 +              else if (pg_strcasecmp(defel->defname, "initcollect") == 0)
 +                      initcollect = defGetString(defel);
 +#endif
                else if (pg_strcasecmp(defel->defname, "minitcond") == 0)
                        minitval = defGetString(defel);
+               else if (pg_strcasecmp(defel->defname, "parallel") == 0)
+                       parallel = defGetString(defel);
                else
                        ereport(WARNING,
                                        (errcode(ERRCODE_SYNTAX_ERROR),
                                                        format_type_be(transTypeId))));
        }
  
 +#ifdef XCP
 +      /*
 +       * look up the aggregate's collecttype.
 +       *
 +       * to the collecttype applied all the limitations as to the transtype.
 +       */
 +      if (collectType)
 +      {
 +              collectTypeId = typenameTypeId(NULL, collectType);
 +              if (get_typtype(collectTypeId) == TYPTYPE_PSEUDO &&
 +                      !IsPolymorphicType(collectTypeId))
 +              {
 +                      if (collectTypeId == INTERNALOID && superuser())
 +                               /* okay */ ;
 +                      else
 +                              ereport(ERROR,
 +                                              (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
 +                                               errmsg("aggregate collection data type cannot be %s",
 +                                                              format_type_be(collectTypeId))));
 +              }
 +      }
 +      else
 +              collectTypeId = InvalidOid;
 +#endif
+       if (serialfuncName && deserialfuncName)
+       {
+               /*
+                * Serialization is only needed/allowed for transtype INTERNAL.
+                */
+               if (transTypeId != INTERNALOID)
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+                                        errmsg("serialization functions may be specified only when the aggregate transition data type is %s",
+                                                       format_type_be(INTERNALOID))));
+       }
+       else if (serialfuncName || deserialfuncName)
+       {
+               /*
+                * Cannot specify one function without the other.
+                */
+               ereport(ERROR,
+                               (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+                                errmsg("must specify both or neither of serialization and deserialization functions")));
+       }
  
        /*
         * If a moving-aggregate transtype is specified, look that up.  Same
                                                   parameterDefaults,
                                                   variadicArgType,
                                                   transfuncName,               /* step function name */
 +#ifdef PGXC
 +                                                 collectfuncName,     /* collect function name */
 +#endif
                                                   finalfuncName,               /* final function name */
+                                                  combinefuncName,             /* combine function name */
+                                                  serialfuncName,              /* serial function name */
+                                                  deserialfuncName,    /* deserial function name */
                                                   mtransfuncName,              /* fwd trans function name */
                                                   minvtransfuncName,   /* inv trans function name */
                                                   mfinalfuncName,              /* final function name */
                                                   mtransTypeId,                /* transition data type */
                                                   mtransSpace, /* transition space */
                                                   initval,             /* initial condition */
-                                                  minitval);   /* initial condition */
 +#ifdef PGXC
 +                                                 initcollect, /* initial condition for collection function */
 +#endif
+                                                  minitval,    /* initial condition */
+                                                  proparallel);                /* parallel safe? */
  }
index a22ea312ec8a2dc0df446a8e585e1bfaab6ca63d,9ac71220a2ac7f7d1fad73e0e792de729f2097d7..14aad4fd7c08274cf573e7a797adb77342dec0ef
@@@ -3,8 -3,7 +3,8 @@@
   * analyze.c
   *      the Postgres statistics generator
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
Simple merge
index d4d0f376c0deba3314484e25db3c96c2adeb34f1,a0d3f8d01dd0a5df7034983f6fce5b040f9905dd..f45da2d9144bf375930e936dd117cce1f77ddb0c
@@@ -4,8 -4,7 +4,8 @@@
   *
   * PostgreSQL object comments utility code.
   *
-  * Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
+  * Copyright (c) 1996-2016, PostgreSQL Global Development Group
   *
   * IDENTIFICATION
   *      src/backend/commands/comment.c
index 783c9f4c42e32465cb94310740af1ff17cdb8816,f45b3304ae9a459f3b0461ec418ec999247cb0f3..8ac52da4bbd898facebc0a763960b53e6dc4cd02
@@@ -3,8 -3,7 +3,8 @@@
   * copy.c
   *            Implements the COPY utility command
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  #include "access/sysattr.h"
  #include "access/xact.h"
  #include "access/xlog.h"
- #include "catalog/namespace.h"
  #include "catalog/pg_type.h"
 +#ifdef XCP
 +#include "catalog/dependency.h"
 +#include "commands/sequence.h"
 +#endif
  #include "commands/copy.h"
  #include "commands/defrem.h"
  #include "commands/trigger.h"
  #include "miscadmin.h"
  #include "optimizer/clauses.h"
  #include "optimizer/planner.h"
- #include "parser/parse_relation.h"
 +#ifdef PGXC
 +#include "pgxc/pgxc.h"
 +#include "pgxc/execRemote.h"
 +#include "pgxc/locator.h"
 +#include "pgxc/remotecopy.h"
 +#include "nodes/nodes.h"
 +#include "pgxc/poolmgr.h"
 +#include "catalog/pgxc_node.h"
 +#endif
  #include "nodes/makefuncs.h"
 +#include "optimizer/pgxcship.h"
  #include "rewrite/rewriteHandler.h"
  #include "storage/fd.h"
  #include "tcop/tcopprot.h"
@@@ -224,13 -199,9 +221,13 @@@ typedef struct CopyStateDat
        char       *raw_buf;
        int                     raw_buf_index;  /* next byte to process */
        int                     raw_buf_len;    /* total # of bytes stored */
 +#ifdef PGXC
 +      /* Remote COPY state data */
 +      RemoteCopyData *remoteCopyState;
 +#endif
  } CopyStateData;
  
- /* DestReceiver for COPY (SELECT) TO */
+ /* DestReceiver for COPY (query) TO */
  typedef struct
  {
        DestReceiver pub;                       /* publicly-known function pointers */
Simple merge
index 44d03f8fab9c4f8b40b05a6f7b83eac1de45767d,50c89b827b2dc5f7d3f2be6042833dcaa0f3d06b..0b58639229e9fde8501e9a729a2103b55abc3409
@@@ -1166,22 -1168,9 +1168,14 @@@ EventTriggerSupportsObjectClass(ObjectC
                case OCLASS_USER_MAPPING:
                case OCLASS_DEFACL:
                case OCLASS_EXTENSION:
 +#ifdef PGXC
 +              case OCLASS_PGXC_CLASS:
 +              case OCLASS_PGXC_NODE:
 +              case OCLASS_PGXC_GROUP:
 +#endif
                case OCLASS_POLICY:
+               case OCLASS_AM:
                        return true;
-               case MAX_OCLASS:
-                       /*
-                        * This shouldn't ever happen, but we keep the case to avoid a
-                        * compiler warning without a "default" clause in the switch.
-                        */
-                       Assert(false);
-                       break;
        }
  
        return true;
index d2d512b99b01d1e1386a33bb9d48890f9edb1fed,82ba58ef713947b3e595279b0fa004fb4578bdef..c91960ac3123372ca40beff6b86972bbb698e57f
@@@ -3,8 -3,7 +3,8 @@@
   * explain.c
   *      Explain query execution plans
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994-5, Regents of the University of California
   *
   * IDENTIFICATION
@@@ -991,19 -909,31 +930,41 @@@ ExplainNode(PlanState *planstate, List 
                case T_WorkTableScan:
                        pname = sname = "WorkTable Scan";
                        break;
 +#ifdef PGXC
 +              case T_RemoteQuery:
 +                      pname = "Remote Fast Query Execution";
 +                      break;
 +#endif
                case T_ForeignScan:
-                       pname = sname = "Foreign Scan";
+                       sname = "Foreign Scan";
+                       switch (((ForeignScan *) plan)->operation)
+                       {
+                               case CMD_SELECT:
+                                       pname = "Foreign Scan";
+                                       operation = "Select";
+                                       break;
+                               case CMD_INSERT:
+                                       pname = "Foreign Insert";
+                                       operation = "Insert";
+                                       break;
+                               case CMD_UPDATE:
+                                       pname = "Foreign Update";
+                                       operation = "Update";
+                                       break;
+                               case CMD_DELETE:
+                                       pname = "Foreign Delete";
+                                       operation = "Delete";
+                                       break;
+                               default:
+                                       pname = "???";
+                                       break;
+                       }
                        break;
 +#ifdef XCP
 +              case T_RemoteSubplan:
 +                      pname = sname = "Remote Subquery Scan";
 +                      break;
 +#endif /* XCP */
                case T_CustomScan:
                        sname = "Custom Scan";
                        custom_name = ((CustomScan *) plan)->methods->CustomName;
                        pname = sname = "Group";
                        break;
                case T_Agg:
-                       sname = "Aggregate";
-                       switch (((Agg *) plan)->aggstrategy)
                        {
-                               case AGG_PLAIN:
-                                       pname = "Aggregate";
-                                       strategy = "Plain";
-                                       break;
-                               case AGG_SORTED:
-                                       pname = "GroupAggregate";
-                                       strategy = "Sorted";
-                                       break;
-                               case AGG_HASHED:
-                                       pname = "HashAggregate";
-                                       strategy = "Hashed";
-                                       break;
-                               default:
-                                       pname = "Aggregate ???";
-                                       strategy = "???";
-                                       break;
+                               Agg                *agg = (Agg *) plan;
+                               sname = "Aggregate";
+                               switch (agg->aggstrategy)
+                               {
+                                       case AGG_PLAIN:
+                                               pname = "Aggregate";
+                                               strategy = "Plain";
+                                               break;
+                                       case AGG_SORTED:
+                                               pname = "GroupAggregate";
+                                               strategy = "Sorted";
+                                               break;
+                                       case AGG_HASHED:
+                                               pname = "HashAggregate";
+                                               strategy = "Hashed";
+                                               break;
+                                       default:
+                                               pname = "Aggregate ???";
+                                               strategy = "???";
+                                               break;
+                               }
+                               if (DO_AGGSPLIT_SKIPFINAL(agg->aggsplit))
+                               {
+                                       partialmode = "Partial";
+                                       pname = psprintf("%s %s", partialmode, pname);
+                               }
+                               else if (DO_AGGSPLIT_COMBINE(agg->aggsplit))
+                               {
+                                       partialmode = "Finalize";
+                                       pname = psprintf("%s %s", partialmode, pname);
+                               }
+                               else
+                                       partialmode = "Simple";
                        }
 +#ifdef XCP
 +                      switch (((Agg *) plan)->aggstrategy)
 +                      {
 +                              case AGG_SLAVE:
 +                                      operation = "Transition";
 +                                      break;
 +                              case AGG_MASTER:
 +                                      operation = "Collection";
 +                                      break;
 +                              default:
 +                                      operation = NULL;
 +                                      break;
 +                      }
 +#endif
 +
                        break;
                case T_WindowAgg:
                        pname = sname = "WindowAgg";
Simple merge
Simple merge
index fad3ee26229405b5a398bfa7036fd36905b4fc61,d14d540b26e0d706126da5575295aa903dcfc26c..1587fb6e80522e6d0016551de4ee75d4ccd218ed
@@@ -3,10 -3,8 +3,10 @@@
   * indexcmds.c
   *      POSTGRES define and remove index code.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
Simple merge
index 4d0d8f410f31f21a80d39f3b1f7714e4e8d37796,e52830f7ec2ceebf4f57953f69bda7d3bbddf57f..bbd5ca54dcff6a2da92a5d95274176f3e126c6c9
@@@ -9,8 -9,7 +9,8 @@@
   * storage management for portals (but doesn't run any queries in them).
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
Simple merge
index b998db2c326e9e0ec2ee2d395d7edccb672fa26c,a60ceb8eba7cb710a5503d2c51259b4b2942298e..255ca89199d671c3cba92388de30f5f140a803ec
@@@ -3,8 -3,7 +3,8 @@@
   * schemacmds.c
   *      schema creation/manipulation commands
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index 6a90fae9d1e05bd2851081937ce88d2cb6332f8b,c98f9811119e8df366aa7e20e8fff2f88c52e31b..96d0e6d4754ae8d2d5a199b47089fb90efc1ed32
@@@ -3,10 -3,8 +3,10 @@@
   * sequence.c
   *      PostgreSQL sequences support code.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
index d63c60c94657446c749a2305b453356015b0994b,86e98148c1667e1b5cf04146e4945f1f5b5c8b42..2daeb6403e7f1706fcc3a3eccb83065210d16600
@@@ -3,10 -3,8 +3,10 @@@
   * tablecmds.c
   *      Commands for creating and altering table structures and settings
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
@@@ -430,18 -421,9 +437,19 @@@ static ObjectAddress ATExecAddOf(Relati
  static void ATExecDropOf(Relation rel, LOCKMODE lockmode);
  static void ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode);
  static void ATExecGenericOptions(Relation rel, List *options);
 +#ifdef PGXC
 +static void AtExecDistributeBy(Relation rel, DistributeBy *options);
 +static void AtExecSubCluster(Relation rel, PGXCSubCluster *options);
 +static void AtExecAddNode(Relation rel, List *options);
 +static void AtExecDeleteNode(Relation rel, List *options);
 +static void ATCheckCmd(Relation rel, AlterTableCmd *cmd);
 +static RedistribState *BuildRedistribCommands(Oid relid, List *subCmds);
 +static Oid *delete_node_list(Oid *old_oids, int old_num, Oid *del_oids, int del_num, int *new_num);
 +static Oid *add_node_list(Oid *old_oids, int old_num, Oid *add_oids, int add_num, int *new_num);
 +#endif
  static void ATExecEnableRowSecurity(Relation rel);
  static void ATExecDisableRowSecurity(Relation rel);
+ static void ATExecForceNoForceRowSecurity(Relation rel, bool force_rls);
  
  static void copy_relation_data(SMgrRelation rel, SMgrRelation dst,
                                   ForkNumber forkNum, char relpersistence);
Simple merge
index 08d92a20d9c319759958f5e1cae7cba825330ad2,99a659a10270a4b448c91d6979215d33ca9c396d..3dec3365567ed4d46543e17ee0dab56f9e2fbbf9
@@@ -3,8 -3,7 +3,8 @@@
   * trigger.c
   *      PostgreSQL TRIGGERs support code.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
index ed2f22c63a735747cb75feae320192b4fa4dcff8,0563e6347430d43c6cd5d4be5c8456fcc3406946..4181dfd167cb2fed45a88eb48a221ef461f68471
@@@ -9,10 -9,8 +9,10 @@@
   * in cluster.c.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
index 5d2a8bdca702618303958f95bad2103ca5f3c986,defafa54b2968169abf28df85b33dcd72edf6259..aafa7485957f788cf4555061a8799067e75fedfb
@@@ -4,8 -4,7 +4,8 @@@
   *            Routines for handling specialized SET variables.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index 32bdfecb9ac471f12052f5e50b83bfb08d8bf7d1,085bf3232054dd240bd1a124eaf7cf649ff09a1f..a809a203e9d0540aba18f8c4d55830d07187230b
@@@ -3,8 -3,7 +3,8 @@@
   * view.c
   *      use rewrite rules to construct views
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index 1154371b9f3100c964054a2adde711176f2d5f67,51edd4c5e709590d75fd9459f43b13d7eca2bad2..6625d56b9715b5de4445edd0792bcc2b14f27a04
@@@ -24,6 -25,6 +25,6 @@@ OBJS = execAmi.o execCurrent.o execGrou
         nodeSamplescan.o nodeSeqscan.o nodeSetOp.o nodeSort.o nodeUnique.o \
         nodeValuesscan.o nodeCtescan.o nodeWorktablescan.o \
         nodeGroup.o nodeSubplan.o nodeSubqueryscan.o nodeTidscan.o \
-        nodeForeignscan.o nodeWindowAgg.o producerReceiver.o tstoreReceiver.o spi.o
 -       nodeForeignscan.o nodeWindowAgg.o tstoreReceiver.o tqueue.o spi.o
++       nodeForeignscan.o nodeWindowAgg.o producerReceiver.o tstoreReceiver.o tqueue.o spi.o
  
  include $(top_srcdir)/src/backend/common.mk
index 79555f5e31f5b98e57b770716180447cc2e123a9,2587ef704626e10b0b744c645cc3a40d89b2c23d..2cb83d75a7e71095dd9c4e8b4c43395ed9f8ed1c
@@@ -3,8 -3,7 +3,8 @@@
   * execAmi.c
   *      miscellaneous executor access method routines
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *    src/backend/executor/execAmi.c
index d1cd93c150783a769e30f41896119fca8bb31fed,2e4e485a0642d446461d67201d050427ae0d6b00..757ea8dddc9183eb7c27ce613466720ccb7a53d1
@@@ -3,8 -3,7 +3,8 @@@
   * execCurrent.c
   *      executor support for WHERE CURRENT OF cursor
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *    src/backend/executor/execCurrent.c
index 872da14acd216ca56587911d2cab87eda66d2a52,32bb3f9205491e5467cc2cc297fcb9387ac48a9c..2eaa33455c9c5952e54f499ac14d8d62b45d5347
@@@ -26,8 -26,7 +26,8 @@@
   *    before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
   *    which should also omit ExecutorRun.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index 581b5441b4c5575b5a82376bb1daa5afdb73e44f,554244ff71f89e49b7543d8a7849b9b7ed069d82..fa7bdfc92363e6ea4f644ec7ef9a0909e4b71c85
@@@ -7,8 -7,7 +7,8 @@@
   *     ExecProcNode, or ExecEndNode on its subnodes and do the appropriate
   *     processing.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  #include "executor/nodeValuesscan.h"
  #include "executor/nodeWindowAgg.h"
  #include "executor/nodeWorktablescan.h"
+ #include "nodes/nodeFuncs.h"
  #include "miscadmin.h"
 -
 +#ifdef PGXC
 +#include "pgxc/execRemote.h"
 +#endif
  
  /* ------------------------------------------------------------------------
   *            ExecInitNode
Simple merge
index 4d512775604a248a44bad7af1017fc422abc29f0,533050dc8593284b7d05ef8942420318c86d67be..63375dc82583ed199de698dbd91bfa0e50908d65
@@@ -12,8 -12,7 +12,8 @@@
   *      This information is needed by routines manipulating tuples
   *      (getattribute, formtuple, etc.).
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  #include "nodes/nodeFuncs.h"
  #include "storage/bufmgr.h"
  #include "utils/builtins.h"
- #include "utils/expandeddatum.h"
  #include "utils/lsyscache.h"
  #include "utils/typcache.h"
 -
 +#ifdef XCP
 +#include "pgxc/pgxc.h"
 +#include "utils/memutils.h"
 +#endif
  
  static TupleDesc ExecTypeFromTLInternal(List *targetList,
                                           bool hasoid, bool skipjunk);
index 7105fa8b82cb791a4765162a4299ad425fe9bf2b,e937cf8e7e2c4b1f6f131389ecb069e343f739be..4bdf76cf5620b6630ef2e0123979ef7d87fa721c
@@@ -3,8 -3,7 +3,8 @@@
   * execUtils.c
   *      miscellaneous executor utility routines
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
Simple merge
index 6b5dc56a0af9551792a5da0fa1032c5d8f4cda76,1ec2515090fb2a4b723cbbd5c73c68426f5e4dd9..d11d6fa144a0ce2d7a68f99f2c49f71b8907e0cc
   *
   *      TODO: AGG_HASHED doesn't support multiple grouping sets yet.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
@@@ -188,32 -217,35 +219,43 @@@ typedef struct AggStatePerTransDat
         */
        int                     numTransInputs;
  
-       /*
-        * Number of arguments to pass to the finalfn.  This is always at least 1
-        * (the transition state value) plus any ordered-set direct args. If the
-        * finalfn wants extra args then we pass nulls corresponding to the
-        * aggregated input columns.
-        */
-       int                     numFinalArgs;
-       /* Oids of transfer functions */
+       /* Oid of the state transition or combine function */
        Oid                     transfn_oid;
 +      Oid                     finalfn_oid;    /* may be InvalidOid */
 +#ifdef PGXC
 +      Oid                     collectfn_oid;  /* may be InvalidOid */
 +#endif /* PGXC */
  
+       /* Oid of the serialization function or InvalidOid */
+       Oid                     serialfn_oid;
+       /* Oid of the deserialization function or InvalidOid */
+       Oid                     deserialfn_oid;
+       /* Oid of state value's datatype */
+       Oid                     aggtranstype;
+       /* ExprStates of the FILTER and argument expressions. */
+       ExprState  *aggfilter;          /* state of FILTER expression, if any */
+       List       *args;                       /* states of aggregated-argument expressions */
+       List       *aggdirectargs;      /* states of direct-argument expressions */
        /*
-        * fmgr lookup data for transfer functions --- only valid when
-        * corresponding oid is not InvalidOid.  Note in particular that fn_strict
-        * flags are kept here.
+        * fmgr lookup data for transition function or combine function.  Note in
+        * particular that the fn_strict flag is kept here.
         */
        FmgrInfo        transfn;
 +      FmgrInfo        finalfn;
 +#ifdef PGXC
 +      FmgrInfo        collectfn;
 +#endif /* PGXC */
  
+       /* fmgr lookup data for serialization function */
+       FmgrInfo        serialfn;
+       /* fmgr lookup data for deserialization function */
+       FmgrInfo        deserialfn;
        /* Input collation derived for aggregate */
        Oid                     aggCollation;
  
         */
        Datum           initValue;
        bool            initValueIsNull;
 +#ifdef PGXC
 +      Datum           initCollectValue;
 +      bool            initCollectValueIsNull;
 +#endif /* PGXC */
  
        /*
-        * We need the len and byval info for the agg's input, result, and
-        * transition data types in order to know how to copy/delete values.
+        * We need the len and byval info for the agg's input and transition data
+        * types in order to know how to copy/delete values.
         *
         * Note that the info for the input type is used only when handling
         * DISTINCT aggs with just one argument, so there is only one input type.
@@@ -594,43 -676,7 +709,44 @@@ initialize_aggregate(AggState *aggstate
         * aggregates like max() and min().) The noTransValue flag signals that we
         * still need to do this.
         */
 +      pergroupstate->noTransValue = peraggstate->initValueIsNull;
 +
 +#ifdef PGXC
 +      /*
 +       * (Re)set collectValue to the initial value.
 +       *
 +       * Note that when the initial value is pass-by-ref, we must copy it
 +       * (into the aggcontext) since we will pfree the collectValue later.
 +       * collection type is same as transition type.
 +       */
 +      if (OidIsValid(peraggstate->collectfn_oid))
 +      {
 +              if (peraggstate->initCollectValueIsNull)
 +                      pergroupstate->collectValue = peraggstate->initCollectValue;
 +              else
 +              {
 +                      MemoryContext oldContext;
 +
 +                      oldContext = MemoryContextSwitchTo(
 +                                      aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
 +                      pergroupstate->collectValue = datumCopy(peraggstate->initCollectValue,
 +                                      peraggstate->transtypeByVal,
 +                                      peraggstate->transtypeLen);
 +                      MemoryContextSwitchTo(oldContext);
 +              }
 +              pergroupstate->collectValueIsNull = peraggstate->initCollectValueIsNull;
 +
 +              /*
 +               * If the initial value for the transition state doesn't exist in the
 +               * pg_aggregate table then we will let the first non-NULL value
 +               * returned from the outer procNode become the initial value. (This is
 +               * useful for aggregates like max() and min().) The noTransValue flag
 +               * signals that we still need to do this.
 +               */
 +              pergroupstate->noCollectValue = peraggstate->initCollectValueIsNull;
 +      }
 +#endif /* PGXC */
+       pergroupstate->noTransValue = pertrans->initValueIsNull;
  }
  
  /*
@@@ -1084,58 -1306,11 +1376,59 @@@ finalize_aggregate(AggState *aggstate
        FunctionCallInfoData fcinfo;
        bool            anynull = false;
        MemoryContext oldContext;
 +#ifdef XCP
 +      Datum value;
 +      bool  isnull;
 +#endif
        int                     i;
        ListCell   *lc;
+       AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
  
        oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
 +#ifdef XCP
 +      if (OidIsValid(peraggstate->collectfn_oid))
 +      {
 +              FunctionCallInfoData fcinfo;
 +              InitFunctionCallInfoData(fcinfo, &(peraggstate->collectfn), 2,
 +                                                                      peraggstate->aggCollation,
 +                                                                      (void *) aggstate, NULL);
 +              fcinfo.arg[1] = pergroupstate->transValue;
 +              fcinfo.argnull[1] = pergroupstate->transValueIsNull;
 +              if (fcinfo.flinfo->fn_strict &&
 +                              (peraggstate->initCollectValueIsNull || pergroupstate->transValueIsNull))
 +              {
 +                      /*
 +                       * We have already checked the collection and transition types are
 +                       * binary compatible, so we can just copy the value.
 +                       */
 +                      value = pergroupstate->transValue;
 +                      isnull = pergroupstate->transValueIsNull;
 +              }
 +              else
 +              {
 +                      /*
 +                       * copy the initial datum since it might get changed inside the
 +                       * collection function
 +                       */
 +                      fcinfo.argnull[0] = peraggstate->initCollectValueIsNull;
 +                      fcinfo.arg[0] = (Datum) NULL;
 +                      if (!fcinfo.argnull[0])
 +                      {
 +                              fcinfo.arg[0] = datumCopy(peraggstate->initCollectValue,
 +                                                              peraggstate->collecttypeByVal,
 +                                                              peraggstate->collecttypeLen);
 +                      }
 +                      value = FunctionCallInvoke(&fcinfo);
 +                      isnull = fcinfo.isnull;
 +              }
 +      }
 +      else
 +      {
 +              /* No collect function, just use transition values to finalize */
 +              value = pergroupstate->transValue;
 +              isnull = pergroupstate->transValueIsNull;
 +      }
 +#endif /* XCP */
  
        /*
         * Evaluate any direct arguments.  We do this even if there's no finalfn
        /*
         * Apply the agg's finalfn if one is provided, else return transValue.
         */
-       if (OidIsValid(peraggstate->finalfn_oid))
+       if (OidIsValid(peragg->finalfn_oid))
        {
-               int                     numFinalArgs = peraggstate->numFinalArgs;
+               int                     numFinalArgs = peragg->numFinalArgs;
  
-               /* set up aggstate->curperagg for AggGetAggref() */
-               aggstate->curperagg = peraggstate;
+               /* set up aggstate->curpertrans for AggGetAggref() */
+               aggstate->curpertrans = pertrans;
  
-               InitFunctionCallInfoData(fcinfo, &peraggstate->finalfn,
+               InitFunctionCallInfoData(fcinfo, &peragg->finalfn,
                                                                 numFinalArgs,
-                                                                peraggstate->aggCollation,
+                                                                pertrans->aggCollation,
                                                                 (void *) aggstate, NULL);
 -
 -              /* Fill in the transition state value */
 +#ifdef XCP
 +              fcinfo.arg[0] = value;
 +              fcinfo.argnull[0] = isnull;
 +#else
                fcinfo.arg[0] = pergroupstate->transValue;
                fcinfo.argnull[0] = pergroupstate->transValueIsNull;
 +#endif /* XCP */
 +
                anynull |= pergroupstate->transValueIsNull;
  
                /* Fill any remaining argument positions with nulls */
@@@ -2370,28 -2641,18 +2769,26 @@@ ExecInitAgg(Agg *node, EState *estate, 
                Oid                     inputTypes[FUNC_MAX_ARGS];
                int                     numArguments;
                int                     numDirectArgs;
-               int                     numInputs;
-               int                     numSortCols;
-               int                     numDistinctCols;
-               List       *sortlist;
                HeapTuple       aggTuple;
                Form_pg_aggregate aggform;
 +              Oid                     aggtranstype;
 +#ifdef XCP
 +              Oid                     aggcollecttype;
 +#endif /* XCP */
                AclResult       aclresult;
                Oid                     transfn_oid,
                                        finalfn_oid;
 -              Expr       *finalfnexpr;
 -              Oid                     aggtranstype;
 +#ifdef PGXC
 +              Oid                     collectfn_oid;
 +              Expr       *collectfnexpr;
 +#endif /* PGXC */
 +              Expr       *transfnexpr,
 +                                 *finalfnexpr;
+               Oid                     serialfn_oid,
+                                       deserialfn_oid;
                Datum           textInitVal;
-               int                     i;
-               ListCell   *lc;
+               Datum           initValue;
+               bool            initValueIsNull;
  
                /* Planner should have assigned aggregate to correct level */
                Assert(aggref->agglevelsup == 0);
                                                   get_func_name(aggref->aggfnoid));
                InvokeFunctionExecuteHook(aggref->aggfnoid);
  
 +              peraggstate->transfn_oid = transfn_oid = aggform->aggtransfn;
 +              peraggstate->finalfn_oid = finalfn_oid = aggform->aggfinalfn;
 +#ifdef PGXC
 +              peraggstate->collectfn_oid = collectfn_oid = aggform->aggcollectfn;
 +              /*
 +               * If preparing PHASE1 skip finalization step and return transmission
 +               * value to be collected and finalized on master node.
 +               * If preparing PHASE2 move collection function into transition slot,
 +               * so master node collected transition values and finalithed them.
 +               * Otherwise (one-node aggregation) do all steps locally, the collection
 +               * function will just convert transient value for finalization function.
 +               */
 +              if (node->aggdistribution == AGG_SLAVE)
 +              {
 +                      peraggstate->collectfn_oid = collectfn_oid = InvalidOid;
 +                      peraggstate->finalfn_oid = finalfn_oid = InvalidOid;
 +              }
 +              else if (node->aggdistribution == AGG_MASTER)
 +              {
 +                      peraggstate->transfn_oid = transfn_oid = collectfn_oid;
 +                      peraggstate->collectfn_oid = collectfn_oid = InvalidOid;
 +                      
 +                      /*
 +                       * Tuples should only be filtered on the datanodes when coordinator
 +                       * is doing collection and finalisation
 +                       */                     
 +                      aggref->aggfilter = NULL;
 +                      aggrefstate->aggfilter = NULL;
 +              }
 +#endif /* PGXC */
+               /* planner recorded transition state type in the Aggref itself */
+               aggtranstype = aggref->aggtranstype;
+               Assert(OidIsValid(aggtranstype));
+               /*
+                * If this aggregation is performing state combines, then instead of
+                * using the transition function, we'll use the combine function
+                */
+               if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
+               {
+                       transfn_oid = aggform->aggcombinefn;
+                       /* If not set then the planner messed up */
+                       if (!OidIsValid(transfn_oid))
+                               elog(ERROR, "combinefn not set for aggregate function");
+               }
+               else
+                       transfn_oid = aggform->aggtransfn;
+               /* Final function only required if we're finalizing the aggregates */
+               if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
+                       peragg->finalfn_oid = finalfn_oid = InvalidOid;
+               else
+                       peragg->finalfn_oid = finalfn_oid = aggform->aggfinalfn;
+               serialfn_oid = InvalidOid;
+               deserialfn_oid = InvalidOid;
+               /*
+                * Check if serialization/deserialization is required.  We only do it
+                * for aggregates that have transtype INTERNAL.
+                */
+               if (aggtranstype == INTERNALOID)
+               {
+                       /*
+                        * The planner should only have generated a serialize agg node if
+                        * every aggregate with an INTERNAL state has a serialization
+                        * function.  Verify that.
+                        */
+                       if (DO_AGGSPLIT_SERIALIZE(aggstate->aggsplit))
+                       {
+                               /* serialization only valid when not running finalfn */
+                               Assert(DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+                               if (!OidIsValid(aggform->aggserialfn))
+                                       elog(ERROR, "serialfunc not provided for serialization aggregation");
+                               serialfn_oid = aggform->aggserialfn;
+                       }
+                       /* Likewise for deserialization functions */
+                       if (DO_AGGSPLIT_DESERIALIZE(aggstate->aggsplit))
+                       {
+                               /* deserialization only valid when combining states */
+                               Assert(DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
+                               if (!OidIsValid(aggform->aggdeserialfn))
+                                       elog(ERROR, "deserialfunc not provided for deserialization aggregation");
+                               deserialfn_oid = aggform->aggdeserialfn;
+                       }
+               }
                /* Check that aggregate owner has permission to call component fns */
                {
                        HeapTuple       procTuple;
                                                                   get_func_name(finalfn_oid));
                                InvokeFunctionExecuteHook(finalfn_oid);
                        }
 +
 +#ifdef PGXC
 +                      if (OidIsValid(collectfn_oid))
 +                      {
 +                              aclresult = pg_proc_aclcheck(collectfn_oid, aggOwner,
 +                                                                                              ACL_EXECUTE);
 +                              if (aclresult != ACLCHECK_OK)
 +                                      aclcheck_error(aclresult, ACL_KIND_PROC,
 +                                                                 get_func_name(collectfn_oid));
 +                      }
 +#endif /* PGXC */
+                       if (OidIsValid(serialfn_oid))
+                       {
+                               aclresult = pg_proc_aclcheck(serialfn_oid, aggOwner,
+                                                                                        ACL_EXECUTE);
+                               if (aclresult != ACLCHECK_OK)
+                                       aclcheck_error(aclresult, ACL_KIND_PROC,
+                                                                  get_func_name(serialfn_oid));
+                               InvokeFunctionExecuteHook(serialfn_oid);
+                       }
+                       if (OidIsValid(deserialfn_oid))
+                       {
+                               aclresult = pg_proc_aclcheck(deserialfn_oid, aggOwner,
+                                                                                        ACL_EXECUTE);
+                               if (aclresult != ACLCHECK_OK)
+                                       aclcheck_error(aclresult, ACL_KIND_PROC,
+                                                                  get_func_name(deserialfn_oid));
+                               InvokeFunctionExecuteHook(deserialfn_oid);
+                       }
                }
  
                /*
                /* Count the "direct" arguments, if any */
                numDirectArgs = list_length(aggref->aggdirectargs);
  
-               /* Count the number of aggregated input columns */
-               numInputs = list_length(aggref->args);
-               peraggstate->numInputs = numInputs;
-               /* Detect how many arguments to pass to the transfn */
-               if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
-                       peraggstate->numTransInputs = numInputs;
-               else
-                       peraggstate->numTransInputs = numArguments;
                /* Detect how many arguments to pass to the finalfn */
                if (aggform->aggfinalextra)
-                       peraggstate->numFinalArgs = numArguments + 1;
+                       peragg->numFinalArgs = numArguments + 1;
                else
 -                      peragg->numFinalArgs = numDirectArgs + 1;
 +                      peraggstate->numFinalArgs = numDirectArgs + 1;
 +
 +              /* resolve actual type of transition state, if polymorphic */
 +#ifdef XCP
 +              /*
 +               * We substitute function for PHASE2 and should take collection type
 +               * as transient
 +               */
 +              if (node->aggdistribution == AGG_MASTER)
 +                      aggtranstype = aggform->aggcollecttype;
 +              else
 +#endif /* XCP */
 +              aggtranstype = resolve_aggregate_transtype(aggref->aggfnoid,
 +                                                                                                 aggform->aggtranstype,
 +                                                                                                 inputTypes,
 +                                                                                                 numArguments);
 +#ifdef XCP
 +              /* get type of collection state, if defined */
 +              if (OidIsValid(collectfn_oid))
 +                      aggcollecttype = aggform->aggcollecttype;
 +              else
 +                      aggcollecttype = InvalidOid;
 +#endif
 +              /* build expression trees using actual argument & result types */
 +              build_aggregate_fnexprs(inputTypes,
 +                                                              numArguments,
 +                                                              numDirectArgs,
 +                                                              peraggstate->numFinalArgs,
 +                                                              aggref->aggvariadic,
 +                                                              aggtranstype,
 +#ifdef XCP
 +                                                              aggcollecttype,
 +#endif
 +                                                              aggref->aggtype,
 +                                                              aggref->inputcollid,
 +                                                              transfn_oid,
 +#ifdef XCP
 +                                                              collectfn_oid,
 +#endif
 +                                                              InvalidOid,             /* invtrans is not needed here */
 +                                                              finalfn_oid,
 +                                                              &transfnexpr,
 +                                                              NULL,
 +#ifdef XCP
 +                                                              &collectfnexpr,
 +#endif
 +                                                              &finalfnexpr);
 +
 +              /* set up infrastructure for calling the transfn and finalfn */
 +              fmgr_info(transfn_oid, &peraggstate->transfn);
 +              fmgr_info_set_expr((Node *) transfnexpr, &peraggstate->transfn);
  
+               /*
+                * build expression trees using actual argument & result types for the
+                * finalfn, if it exists and is required.
+                */
                if (OidIsValid(finalfn_oid))
                {
-                       fmgr_info(finalfn_oid, &peraggstate->finalfn);
-                       fmgr_info_set_expr((Node *) finalfnexpr, &peraggstate->finalfn);
+                       build_aggregate_finalfn_expr(inputTypes,
+                                                                                peragg->numFinalArgs,
+                                                                                aggtranstype,
+                                                                                aggref->aggtype,
+                                                                                aggref->inputcollid,
+                                                                                finalfn_oid,
+                                                                                &finalfnexpr);
+                       fmgr_info(finalfn_oid, &peragg->finalfn);
+                       fmgr_info_set_expr((Node *) finalfnexpr, &peragg->finalfn);
                }
  
 +#ifdef PGXC
 +              if (OidIsValid(collectfn_oid))
 +              {
 +                      fmgr_info(collectfn_oid, &peraggstate->collectfn);
 +                      peraggstate->collectfn.fn_expr = (Node *)collectfnexpr;
 +              }
 +#endif /* PGXC */
 +              peraggstate->aggCollation = aggref->inputcollid;
 +
 +              InitFunctionCallInfoData(peraggstate->transfn_fcinfo,
 +                                                               &peraggstate->transfn,
 +                                                               peraggstate->numTransInputs + 1,
 +                                                               peraggstate->aggCollation,
 +                                                               (void *) aggstate, NULL);
 +
 +              /* get info about relevant datatypes */
 +              get_typlenbyval(aggref->aggtype,
 +                                              &peraggstate->resulttypeLen,
 +                                              &peraggstate->resulttypeByVal);
 +              get_typlenbyval(aggtranstype,
 +                                              &peraggstate->transtypeLen,
 +                                              &peraggstate->transtypeByVal);
 +#ifdef XCP
 +              if (OidIsValid(aggcollecttype))
 +                      get_typlenbyval(aggcollecttype,
 +                                                      &peraggstate->collecttypeLen,
 +                                                      &peraggstate->collecttypeByVal);
 +#endif /* XCP */
+               /* get info about the output value's datatype */
+               get_typlenbyval(aggref->aggtype,
+                                               &peragg->resulttypeLen,
+                                               &peragg->resulttypeByVal);
  
                /*
                 * initval is potentially null, so don't try to access it as a struct
                 * field. Must do it the hard way with SysCacheGetAttr.
                 */
 +#ifdef XCP
 +              /*
 +               * If this is Phase2 get collect initial value instead
 +               */
 +              if (node->aggdistribution == AGG_MASTER)
 +                      textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
 +                                                                                Anum_pg_aggregate_agginitcollect,
 +                                                                                &peraggstate->initValueIsNull);
 +              else
 +#endif /* XCP */
                textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
                                                                          Anum_pg_aggregate_agginitval,
-                                                                         &peraggstate->initValueIsNull);
+                                                                         &initValueIsNull);
+               if (initValueIsNull)
+                       initValue = (Datum) 0;
+               else
+                       initValue = GetAggInitVal(textInitVal, aggtranstype);
  
-               if (peraggstate->initValueIsNull)
-                       peraggstate->initValue = (Datum) 0;
+               /*
+                * 2. Build working state for invoking the transition function, or
+                * look up previously initialized working state, if we can share it.
+                *
+                * find_compatible_peragg() already collected a list of per-Trans's
+                * with the same inputs. Check if any of them have the same transition
+                * function and initial value.
+                */
+               existing_transno = find_compatible_pertrans(aggstate, aggref,
+                                                                                                       transfn_oid, aggtranstype,
+                                                                                               serialfn_oid, deserialfn_oid,
+                                                                                                 initValue, initValueIsNull,
+                                                                                                       same_input_transnos);
+               if (existing_transno != -1)
+               {
+                       /*
+                        * Existing compatible trans found, so just point the 'peragg' to
+                        * the same per-trans struct.
+                        */
+                       pertrans = &pertransstates[existing_transno];
+                       peragg->transno = existing_transno;
+               }
                else
-                       peraggstate->initValue = GetAggInitVal(textInitVal,
-                                                                                                  aggtranstype);
+               {
+                       pertrans = &pertransstates[++transno];
+                       build_pertrans_for_aggref(pertrans, aggstate, estate,
+                                                                         aggref, transfn_oid, aggtranstype,
+                                                                         serialfn_oid, deserialfn_oid,
+                                                                         initValue, initValueIsNull,
+                                                                         inputTypes, numArguments);
+                       peragg->transno = transno;
+               }
+               ReleaseSysCache(aggTuple);
+       }
+       /*
+        * Update numaggs to match the number of unique aggregates found. Also set
+        * numstates to the number of unique aggregate states found.
+        */
+       aggstate->numaggs = aggno + 1;
+       aggstate->numtrans = transno + 1;
+       return aggstate;
+ }
+ /*
+  * Build the state needed to calculate a state value for an aggregate.
+  *
+  * This initializes all the fields in 'pertrans'. 'aggref' is the aggregate
+  * to initialize the state for. 'aggtransfn', 'aggtranstype', and the rest
+  * of the arguments could be calculated from 'aggref', but the caller has
+  * calculated them already, so might as well pass them.
+  */
+ static void
+ build_pertrans_for_aggref(AggStatePerTrans pertrans,
+                                                 AggState *aggstate, EState *estate,
+                                                 Aggref *aggref,
+                                                 Oid aggtransfn, Oid aggtranstype,
+                                                 Oid aggserialfn, Oid aggdeserialfn,
+                                                 Datum initValue, bool initValueIsNull,
+                                                 Oid *inputTypes, int numArguments)
+ {
+       int                     numGroupingSets = Max(aggstate->maxsets, 1);
+       Expr       *serialfnexpr = NULL;
+       Expr       *deserialfnexpr = NULL;
+       ListCell   *lc;
+       int                     numInputs;
+       int                     numDirectArgs;
+       List       *sortlist;
+       int                     numSortCols;
+       int                     numDistinctCols;
+       int                     naggs;
+       int                     i;
+       /* Begin filling in the pertrans data */
+       pertrans->aggref = aggref;
+       pertrans->aggCollation = aggref->inputcollid;
+       pertrans->transfn_oid = aggtransfn;
+       pertrans->serialfn_oid = aggserialfn;
+       pertrans->deserialfn_oid = aggdeserialfn;
+       pertrans->initValue = initValue;
+       pertrans->initValueIsNull = initValueIsNull;
+       /* Count the "direct" arguments, if any */
+       numDirectArgs = list_length(aggref->aggdirectargs);
+       /* Count the number of aggregated input columns */
+       pertrans->numInputs = numInputs = list_length(aggref->args);
+       pertrans->aggtranstype = aggtranstype;
+       /* Detect how many arguments to pass to the transfn */
+       if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
+               pertrans->numTransInputs = numInputs;
+       else
+               pertrans->numTransInputs = numArguments;
+       /*
+        * When combining states, we have no use at all for the aggregate
+        * function's transfn. Instead we use the combinefn.  In this case, the
+        * transfn and transfn_oid fields of pertrans refer to the combine
+        * function rather than the transition function.
+        */
+       if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
+       {
+               Expr       *combinefnexpr;
+               build_aggregate_combinefn_expr(aggtranstype,
+                                                                          aggref->inputcollid,
+                                                                          aggtransfn,
+                                                                          &combinefnexpr);
+               fmgr_info(aggtransfn, &pertrans->transfn);
+               fmgr_info_set_expr((Node *) combinefnexpr, &pertrans->transfn);
+               InitFunctionCallInfoData(pertrans->transfn_fcinfo,
+                                                                &pertrans->transfn,
+                                                                2,
+                                                                pertrans->aggCollation,
+                                                                (void *) aggstate, NULL);
+               /*
+                * Ensure that a combine function to combine INTERNAL states is not
+                * strict. This should have been checked during CREATE AGGREGATE, but
+                * the strict property could have been changed since then.
+                */
+               if (pertrans->transfn.fn_strict && aggtranstype == INTERNALOID)
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+                                        errmsg("combine function for aggregate %u must be declared as STRICT",
+                                                       aggref->aggfnoid)));
+       }
+       else
+       {
+               Expr       *transfnexpr;
+               /*
+                * Set up infrastructure for calling the transfn.  Note that invtrans
+                * is not needed here.
+                */
+               build_aggregate_transfn_expr(inputTypes,
+                                                                        numArguments,
+                                                                        numDirectArgs,
+                                                                        aggref->aggvariadic,
+                                                                        aggtranstype,
+                                                                        aggref->inputcollid,
+                                                                        aggtransfn,
+                                                                        InvalidOid,
+                                                                        &transfnexpr,
+                                                                        NULL);
+               fmgr_info(aggtransfn, &pertrans->transfn);
+               fmgr_info_set_expr((Node *) transfnexpr, &pertrans->transfn);
+               InitFunctionCallInfoData(pertrans->transfn_fcinfo,
+                                                                &pertrans->transfn,
+                                                                pertrans->numTransInputs + 1,
+                                                                pertrans->aggCollation,
+                                                                (void *) aggstate, NULL);
  
 +#ifdef PGXC
 +              /*
 +               * initval for collection function is potentially null, so don't try to
 +               * access it as a struct field. Must do it the hard way with
 +               * SysCacheGetAttr.
 +               */
 +              if (OidIsValid(aggcollecttype))
 +              {
 +                      textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
 +                                                                                Anum_pg_aggregate_agginitcollect,
 +                                                                                &peraggstate->initCollectValueIsNull);
 +                      if (peraggstate->initCollectValueIsNull)
 +                              peraggstate->initCollectValue = (Datum) 0;
 +                      else
 +                              peraggstate->initCollectValue = GetAggInitVal(textInitVal,
 +                                                                                                                        aggcollecttype);
 +                      /*
 +                       * If the collectfn is strict and the initval is NULL, make sure
 +                       * transtype and collecttype are the same (or at least
 +                       * binary-compatible), so that it's OK to use the transition value
 +                       * as the initial collectValue. This should have been checked at agg
 +                       * definition time, but just in case...
 +                       */
 +                      if (peraggstate->collectfn.fn_strict && peraggstate->initValueIsNull)
 +                      {
 +                              if (!IsBinaryCoercible(aggtranstype, aggcollecttype))
 +                                      ereport(ERROR,
 +                                                      (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
 +                                                       errmsg("aggregate %u needs to have compatible transition type and collection type",
 +                                                                      aggref->aggfnoid)));
 +                      }
 +              }
 +#endif /* PGXC */
 +
                /*
                 * If the transfn is strict and the initval is NULL, make sure input
                 * type and transtype are the same (or at least binary-compatible), so
index d5e933e1969638bc9686c689fd84b5548019feb8,d886aaf64d6776252020622ddd454633791f1f25..583e39021bcb39b63f500887befefe9b1fc9b32b
  #include "executor/executor.h"
  #include "executor/nodeForeignscan.h"
  #include "foreign/fdwapi.h"
+ #include "utils/memutils.h"
  #include "utils/rel.h"
  
 +#ifdef PGXC
 +#include "utils/lsyscache.h"
 +#include "pgxc/pgxc.h"
 +#endif
 +
  static TupleTableSlot *ForeignNext(ForeignScanState *node);
  static bool ForeignRecheck(ForeignScanState *node, TupleTableSlot *slot);
  
index 18b8589deaf7a084f79b02ae83c82867a7d2b533,af7b26c0ef01ae5db18394db8740cef7e06afe1f..439e36ee3adda81e271f66bbba79c6d7468f6d19
@@@ -3,8 -3,7 +3,8 @@@
   * nodeModifyTable.c
   *      routines to handle ModifyTable nodes.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -1527,9 -1591,12 +1592,13 @@@ ExecInitModifyTable(ModifyTable *node, 
        i = 0;
        foreach(l, node->plans)
        {
 +
                subplan = (Plan *) lfirst(l);
  
+               /* Initialize the usesFdwDirectModify flag */
+               resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
+                                                                                                node->fdwDirectModifyPlans);
                /*
                 * Verify result relation is a valid target for the current operation
                 */
Simple merge
Simple merge
index 8d62ea6f40cca464c8555937beefbb55c8ff47ff,d4c88a1f0efdcda2fda12d06f1eee2bd87433706..064ab4ac0215c8a3c00946ceabcfca21c74d9dd0
@@@ -23,8 -23,7 +23,8 @@@
   * aggregate function over all rows in the current row's window frame.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
Simple merge
Simple merge
index dd96a8b641e55c02bf572f5f1eff6f3f46f3f44a,cdd07d577b08e381d92225bbdfe9c033fb302b29..610082839940278596f006fdf5d0511571e21d2a
@@@ -151,34 -144,34 +144,56 @@@ retry
  
                Assert(waitfor);
  
-               w = WaitLatchOrSocket(MyLatch,
-                                                         WL_LATCH_SET | WL_POSTMASTER_DEATH | waitfor,
-                                                         port->sock, 0);
 -              ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
++              ModifyWaitEvent(FeBeWaitSet, 0, waitfor | WL_POSTMASTER_DEATH, NULL);
+               WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
+               /*
+                * If the postmaster has died, it's not safe to continue running,
+                * because it is the postmaster's job to kill us if some other backend
+                * exists uncleanly.  Moreover, we won't run very well in this state;
+                * helper processes like walwriter and the bgwriter will exit, so
+                * performance may be poor.  Finally, if we don't exit, pg_ctl will be
+                * unable to restart the postmaster without manual intervention, so no
+                * new connections can be accepted.  Exiting clears the deck for a
+                * postmaster restart.
+                *
+                * (Note that we only make this check when we would otherwise sleep on
+                * our latch.  We might still continue running for a while if the
+                * postmaster is killed in mid-query, or even through multiple queries
+                * if we never have to wait for read.  We don't want to burn too many
+                * cycles checking for this very rare condition, and this should cause
+                * us to exit quickly in most cases.)
+                */
+               if (event.events & WL_POSTMASTER_DEATH)
+                       ereport(FATAL,
+                                       (errcode(ERRCODE_ADMIN_SHUTDOWN),
+                                        errmsg("terminating connection due to unexpected postmaster exit")));
  
 +              /*
 +               * If the postmaster has died, it's not safe to continue running,
 +               * because it is the postmaster's job to kill us if some other backend
 +               * exists uncleanly.  Moreover, we won't run very well in this state;
 +               * helper processes like walwriter and the bgwriter will exit, so
 +               * performance may be poor.  Finally, if we don't exit, pg_ctl will
 +               * be unable to restart the postmaster without manual intervention,
 +               * so no new connections can be accepted.  Exiting clears the deck
 +               * for a postmaster restart.
 +               *
 +               * (Note that we only make this check when we would otherwise sleep
 +               * on our latch.  We might still continue running for a while if the
 +               * postmaster is killed in mid-query, or even through multiple queries
 +               * if we never have to wait for read.  We don't want to burn too many
 +               * cycles checking for this very rare condition, and this should cause
 +               * us to exit quickly in most cases.)
 +               */
 +              if (w & WL_POSTMASTER_DEATH)
 +                      ereport(FATAL,
 +                                      (errcode(ERRCODE_ADMIN_SHUTDOWN),
 +                                      errmsg("terminating connection due to unexpected postmaster exit")));
 +
                /* Handle interrupt. */
-               if (w & WL_LATCH_SET)
+               if (event.events & WL_LATCH_SET)
                {
                        ResetLatch(MyLatch);
                        ProcessClientReadInterrupt(true);
@@@ -252,18 -245,18 +267,24 @@@ retry
  
                Assert(waitfor);
  
-               w = WaitLatchOrSocket(MyLatch,
-                                                         WL_LATCH_SET | WL_POSTMASTER_DEATH | waitfor,
-                                                         port->sock, 0);
 -              ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
++              ModifyWaitEvent(FeBeWaitSet, 0, waitfor | WL_POSTMASTER_DEATH, NULL);
+               WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
+               /* See comments in secure_read. */
+               if (event.events & WL_POSTMASTER_DEATH)
+                       ereport(FATAL,
+                                       (errcode(ERRCODE_ADMIN_SHUTDOWN),
+                                        errmsg("terminating connection due to unexpected postmaster exit")));
  
 +              /* See comments in secure_read. */
 +              if (w & WL_POSTMASTER_DEATH)
 +                      ereport(FATAL,
 +                                      (errcode(ERRCODE_ADMIN_SHUTDOWN),
 +                                      errmsg("terminating connection due to unexpected postmaster exit")));
 +
                /* Handle interrupt. */
-               if (w & WL_LATCH_SET)
+               if (event.events & WL_LATCH_SET)
                {
                        ResetLatch(MyLatch);
                        ProcessClientWriteInterrupt(true);
Simple merge
Simple merge
index 218c1d1d53ee673222c57d23b8f1ac3798adceb8,3244c76ddcca13b79db7902da12a22f4f3ccf283..cb657a8a4b369ca0aafb80371fc48260790d93c8
   * be handled easily in a simple depth-first traversal.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * IDENTIFICATION
   *      src/backend/nodes/copyfuncs.c
  #include "postgres.h"
  
  #include "miscadmin.h"
+ #include "nodes/extensible.h"
  #include "nodes/plannodes.h"
  #include "nodes/relation.h"
 +#ifdef PGXC
 +#include "pgxc/locator.h"
 +#include "pgxc/planner.h"
 +#endif
 +#ifdef XCP
 +#include "pgxc/execRemote.h"
 +#endif
  #include "utils/datum.h"
+ #include "utils/rel.h"
  
  
  /*
@@@ -102,17 -97,6 +106,16 @@@ _copyPlannedStmt(const PlannedStmt *fro
        COPY_NODE_FIELD(relationOids);
        COPY_NODE_FIELD(invalItems);
        COPY_SCALAR_FIELD(nParamExec);
-       COPY_SCALAR_FIELD(hasRowSecurity);
 +#ifdef XCP
 +      COPY_SCALAR_FIELD(nParamRemote);
 +      COPY_POINTER_FIELD(remoteparams,
 +                                         newnode->nParamRemote * sizeof(RemoteParam));
 +      COPY_STRING_FIELD(pname);
 +      COPY_SCALAR_FIELD(distributionType);
 +      COPY_SCALAR_FIELD(distributionKey);
 +      COPY_NODE_FIELD(distributionNodes);
 +      COPY_NODE_FIELD(distributionRestrict);
 +#endif
  
        return newnode;
  }
@@@ -856,9 -869,7 +888,10 @@@ _copyAgg(const Agg *from
        CopyPlanFields((const Plan *) from, (Plan *) newnode);
  
        COPY_SCALAR_FIELD(aggstrategy);
 +#ifdef XCP
 +      COPY_SCALAR_FIELD(aggdistribution);
 +#endif
+       COPY_SCALAR_FIELD(aggsplit);
        COPY_SCALAR_FIELD(numCols);
        if (from->numCols > 0)
        {
@@@ -4381,88 -4250,19 +4463,103 @@@ _copyValue(const Value *from
        return newnode;
  }
  
 +#ifdef PGXC
 +/* ****************************************************************
 + *                                    barrier.h copy functions
 + * ****************************************************************
 + */
 +static BarrierStmt *
 +_copyBarrierStmt(const BarrierStmt *from)
 +{
 +      BarrierStmt *newnode = makeNode(BarrierStmt);
 +
 +      COPY_STRING_FIELD(id);
 +
 +      return newnode;
 +}
 +
 +static PauseClusterStmt *
 +_copyPauseClusterStmt(const PauseClusterStmt *from)
 +{
 +      PauseClusterStmt *newnode = makeNode(PauseClusterStmt);
 +
 +      COPY_SCALAR_FIELD(pause);
 +
 +      return newnode;
 +}
 +
 +/* ****************************************************************
 + *                                    nodemgr.h copy functions
 + * ****************************************************************
 + */
 +static AlterNodeStmt *
 +_copyAlterNodeStmt(const AlterNodeStmt *from)
 +{
 +      AlterNodeStmt *newnode = makeNode(AlterNodeStmt);
 +
 +      COPY_STRING_FIELD(node_name);
 +      COPY_NODE_FIELD(options);
 +
 +      return newnode;
 +}
 +
 +static CreateNodeStmt *
 +_copyCreateNodeStmt(const CreateNodeStmt *from)
 +{
 +      CreateNodeStmt *newnode = makeNode(CreateNodeStmt);
 +
 +      COPY_STRING_FIELD(node_name);
 +      COPY_NODE_FIELD(options);
 +
 +      return newnode;
 +}
 +
 +static DropNodeStmt *
 +_copyDropNodeStmt(const DropNodeStmt *from)
 +{
 +      DropNodeStmt *newnode = makeNode(DropNodeStmt);
 +
 +      COPY_STRING_FIELD(node_name);
 +
 +      return newnode;
 +}
 +
 +/* ****************************************************************
 + *                                    groupmgr.h copy functions
 + * ****************************************************************
 + */
 +static CreateGroupStmt *
 +_copyCreateGroupStmt(const CreateGroupStmt *from)
 +{
 +      CreateGroupStmt *newnode = makeNode(CreateGroupStmt);
 +
 +      COPY_STRING_FIELD(group_name);
 +      COPY_NODE_FIELD(nodes);
 +
 +      return newnode;
 +}
 +
 +static DropGroupStmt *
 +_copyDropGroupStmt(const DropGroupStmt *from)
 +{
 +      DropGroupStmt *newnode = makeNode(DropGroupStmt);
 +
 +      COPY_STRING_FIELD(group_name);
++      return newnode;
++}
+ static ForeignKeyCacheInfo *
+ _copyForeignKeyCacheInfo(const ForeignKeyCacheInfo *from)
+ {
+       ForeignKeyCacheInfo *newnode = makeNode(ForeignKeyCacheInfo);
+       COPY_SCALAR_FIELD(conrelid);
+       COPY_SCALAR_FIELD(confrelid);
+       COPY_SCALAR_FIELD(nkeys);
+       /* COPY_SCALAR_FIELD might work for these, but let's not assume that */
+       memcpy(newnode->conkey, from->conkey, sizeof(newnode->conkey));
+       memcpy(newnode->confkey, from->confkey, sizeof(newnode->confkey));
+       memcpy(newnode->conpfeqop, from->conpfeqop, sizeof(newnode->conpfeqop));
  
        return newnode;
  }
index dfb273ba291e521626b9a1edc4696b157633145d,1eb679926af9f0f1ab412f11278711756e6ccae0..e6f44f1cf833ec93074ccf11ec30392d11e31899
   * "x" to be considered equal() to another reference to "x" in the query.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * IDENTIFICATION
   *      src/backend/nodes/equalfuncs.c
@@@ -215,8 -191,11 +216,10 @@@ static boo
  _equalAggref(const Aggref *a, const Aggref *b)
  {
        COMPARE_SCALAR_FIELD(aggfnoid);
 -      COMPARE_SCALAR_FIELD(aggtype);
        COMPARE_SCALAR_FIELD(aggcollid);
        COMPARE_SCALAR_FIELD(inputcollid);
+       /* ignore aggtranstype since it might not be set yet */
+       COMPARE_NODE_FIELD(aggargtypes);
        COMPARE_NODE_FIELD(aggdirectargs);
        COMPARE_NODE_FIELD(args);
        COMPARE_NODE_FIELD(aggorder);
Simple merge
Simple merge
index 37031197f8ebe8ab0f4871ad6a5ba4874970897b,acaf4ea5ebcd694b266001ddc8526891b56bcddb..b101ffb570feb98730c8a338cf5b7ebd59a078b3
@@@ -3,8 -3,7 +3,8 @@@
   * outfuncs.c
   *      Output functions for Postgres tree nodes.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  #include <ctype.h>
  
  #include "lib/stringinfo.h"
+ #include "nodes/extensible.h"
  #include "nodes/plannodes.h"
  #include "nodes/relation.h"
 +#ifdef XCP
 +#include "fmgr.h"
 +#include "miscadmin.h"
 +#include "catalog/namespace.h"
 +#include "pgxc/execRemote.h"
 +#include "utils/lsyscache.h"
 +#endif
  #include "utils/datum.h"
 +#ifdef PGXC
 +#include "pgxc/planner.h"
 +#endif
 +
 +#ifdef XCP
 +/*
 + * When we sending query plans between nodes we need to send OIDs of various
 + * objects - relations, data types, functions, etc.
 + * On different nodes OIDs of these objects may differ, so we need to send an
 + * identifier, depending on object type, allowing to lookup OID on target node.
 + * On the other hand we want to save space when storing rules, or in other cases
 + * when we need to encode and decode nodes on the same node.
 + * For now default format is not portable, as it is in original Postgres code.
 + * Later we may want to add extra parameter in nodeToString() function
 + */
 +static bool portable_output = false;
 +void
 +set_portable_output(bool value)
 +{
 +      portable_output = value;
 +}
 +#endif
+ #include "utils/rel.h"
  
  
  /*
  
  /* Write a Node field */
  #define WRITE_NODE_FIELD(fldname) \
 -      (appendStringInfo(str, " :" CppAsString(fldname) " "), \
 -       outNode(str, node->fldname))
 +      do { \
 +              appendStringInfo(str, " :" CppAsString(fldname) " "); \
-               _outNode(str, node->fldname); \
++              outNode(str, node->fldname); \
 +      } while (0)
  
  /* Write a bitmapset field */
  #define WRITE_BITMAPSET_FIELD(fldname) \
@@@ -536,19 -358,10 +556,19 @@@ _outModifyTable(StringInfo str, const M
        WRITE_NODE_FIELD(rowMarks);
        WRITE_INT_FIELD(epqParam);
        WRITE_ENUM_FIELD(onConflictAction, OnConflictAction);
 +#ifdef XCP    
 +      if (portable_output)
 +              WRITE_RELID_LIST_FIELD(arbiterIndexes);
 +      else
 +      {
 +#endif
        WRITE_NODE_FIELD(arbiterIndexes);
 +#ifdef XCP
 +      }
 +#endif
        WRITE_NODE_FIELD(onConflictSet);
        WRITE_NODE_FIELD(onConflictWhere);
-       WRITE_INT_FIELD(exclRelRTI);
+       WRITE_UINT_FIELD(exclRelRTI);
        WRITE_NODE_FIELD(exclRelTlist);
  }
  
@@@ -1020,9 -704,7 +1054,10 @@@ _outAgg(StringInfo str, const Agg *node
        _outPlanInfo(str, (const Plan *) node);
  
        WRITE_ENUM_FIELD(aggstrategy, AggStrategy);
 +#ifdef XCP
 +      WRITE_ENUM_FIELD(aggdistribution, AggDistribution);
 +#endif
+       WRITE_ENUM_FIELD(aggsplit, AggSplit);
        WRITE_INT_FIELD(numCols);
  
        appendStringInfoString(str, " :grpColIdx");
@@@ -1684,12 -1005,7 +1718,12 @@@ _outConst(StringInfo str, const Const *
        if (node->constisnull)
                appendStringInfoString(str, "<>");
        else
-               _outDatum(str, node->constvalue, node->constlen, node->constbyval);
 +#ifdef XCP
 +              if (portable_output)
 +                      _printDatum(str, node->constvalue, node->consttype);
 +              else
 +#endif
+               outDatum(str, node->constvalue, node->constlen, node->constbyval);
  }
  
  static void
@@@ -1720,30 -1026,12 +1754,32 @@@ _outAggref(StringInfo str, const Aggre
  {
        WRITE_NODE_TYPE("AGGREF");
  
 +#ifdef XCP
 +      if (portable_output)
 +              WRITE_FUNCID_FIELD(aggfnoid);
 +      else
 +#endif
        WRITE_OID_FIELD(aggfnoid);
 +#ifdef XCP
 +      if (portable_output)
 +              WRITE_TYPID_FIELD(aggtype);
 +      else
 +#endif
        WRITE_OID_FIELD(aggtype);
 +#ifdef XCP
 +      if (portable_output)
 +              WRITE_COLLID_FIELD(aggcollid);
 +      else
 +#endif
        WRITE_OID_FIELD(aggcollid);
 +#ifdef XCP
 +      if (portable_output)
 +              WRITE_COLLID_FIELD(inputcollid);
 +      else
 +#endif
        WRITE_OID_FIELD(inputcollid);
+       WRITE_OID_FIELD(aggtranstype);
+       WRITE_NODE_FIELD(aggargtypes);
        WRITE_NODE_FIELD(aggdirectargs);
        WRITE_NODE_FIELD(args);
        WRITE_NODE_FIELD(aggorder);
@@@ -4590,11 -3845,9 +4972,17 @@@ outNode(StringInfo str, const void *obj
                        case T_XmlSerialize:
                                _outXmlSerialize(str, obj);
                                break;
++<<<<<<< HEAD
 +#ifdef PGXC
 +                      case T_ExecNodes:
 +                              _outExecNodes(str, obj);
 +                              break;
 +#endif
++=======
+                       case T_ForeignKeyCacheInfo:
+                               _outForeignKeyCacheInfo(str, obj);
+                               break;
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
  
                        default:
  
Simple merge
index 00a59bd80f885b2cb43c9452bbd91f3c77a54a00,94954dcc722a3d865bc50a7b0a8c31cb68be2bda..ec63b9017a1859e628859d1c59c3eaa5fc4fd78b
@@@ -3,10 -3,8 +3,10 @@@
   * readfuncs.c
   *      Reader functions for Postgres tree nodes.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
  
  #include <math.h>
  
+ #include "fmgr.h"
+ #include "nodes/extensible.h"
  #include "nodes/parsenodes.h"
+ #include "nodes/plannodes.h"
  #include "nodes/readfuncs.h"
 +#ifdef PGXC
 +#include "access/htup.h"
 +#endif
 +#ifdef XCP
 +#include "fmgr.h"
 +#include "catalog/namespace.h"
 +#include "catalog/pg_class.h"
 +#include "nodes/plannodes.h"
 +#include "pgxc/execRemote.h"
 +#include "utils/builtins.h"
 +#include "utils/lsyscache.h"
  
  
 +/*
 + * When we sending query plans between nodes we need to send OIDs of various
 + * objects - relations, data types, functions, etc.
 + * On different nodes OIDs of these objects may differ, so we need to send an
 + * identifier, depending on object type, allowing to lookup OID on target node.
 + * On the other hand we want to save space when storing rules, or in other cases
 + * when we need to encode and decode nodes on the same node.
 + * For now default format is not portable, as it is in original Postgres code.
 + * Later we may want to add extra parameter in stringToNode() function
 + */
 +static bool portable_input = false;
 +void
 +set_portable_input(bool value)
 +{
 +      portable_input = value;
 +}
 +#endif /* XCP */
 +
  /*
   * Macros to simplify reading of different kinds of fields.  Use these
   * wherever possible to reduce the chance for silly typos.  Note that these
        token = pg_strtok(&length);             /* get field value */ \
        local_node->fldname = atoui(token)
  
++<<<<<<< HEAD
 +#ifdef XCP
 +/* Read a long integer field (anything written as ":fldname %ld") */
++=======
+ /* Read an long integer field (anything written as ":fldname %ld") */
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
  #define READ_LONG_FIELD(fldname) \
        token = pg_strtok(&length);             /* skip :fldname */ \
        token = pg_strtok(&length);             /* get field value */ \
        local_node->fldname = atol(token)
++<<<<<<< HEAD
 +#endif
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
  
  /* Read an OID field (don't hard-wire assumption that OID is same as uint) */
 +#ifdef XCP
 +#define READ_OID_FIELD(fldname) \
 +      (AssertMacro(!portable_input),  /* only allow to read OIDs within a node */ \
 +       token = pg_strtok(&length),    /* skip :fldname */ \
 +       token = pg_strtok(&length),    /* get field value */ \
 +       local_node->fldname = atooid(token))
 +#else
  #define READ_OID_FIELD(fldname) \
        token = pg_strtok(&length);             /* skip :fldname */ \
        token = pg_strtok(&length);             /* get field value */ \
        (void) token;                           /* in case not used elsewhere */ \
        local_node->fldname = _readBitmapset()
  
++<<<<<<< HEAD
 +#ifdef XCP
 +/* Read fields of a Plan node */
 +#define READ_PLAN_FIELDS(nodeTypeName) \
 +      Plan *plan_node; \
 +      READ_LOCALS(nodeTypeName); \
 +      plan_node = (Plan *) local_node; \
 +      token = pg_strtok(&length);             /* skip :startup_cost */ \
 +      token = pg_strtok(&length);             /* get field value */ \
 +      plan_node->startup_cost = atof(token); \
 +      token = pg_strtok(&length);             /* skip :total_cost */ \
 +      token = pg_strtok(&length);             /* get field value */ \
 +      plan_node->total_cost = atof(token); \
 +      token = pg_strtok(&length);             /* skip :plan_rows */ \
 +      token = pg_strtok(&length);             /* get field value */ \
 +      plan_node->plan_rows = atof(token); \
 +      token = pg_strtok(&length);             /* skip :plan_width */ \
 +      token = pg_strtok(&length);             /* get field value */ \
 +      plan_node->plan_width = atoi(token); \
 +      token = pg_strtok(&length);             /* skip :targetlist */ \
 +      plan_node->targetlist = nodeRead(NULL, 0); \
 +      token = pg_strtok(&length);             /* skip :qual */ \
 +      plan_node->qual = nodeRead(NULL, 0); \
 +      token = pg_strtok(&length);             /* skip :lefttree */ \
 +      plan_node->lefttree = nodeRead(NULL, 0); \
 +      token = pg_strtok(&length);             /* skip :righttree */ \
 +      plan_node->righttree = nodeRead(NULL, 0); \
 +      token = pg_strtok(&length);             /* skip :initPlan */ \
 +      plan_node->initPlan = nodeRead(NULL, 0); \
 +      token = pg_strtok(&length);             /* skip :extParam */ \
 +      plan_node->extParam = _readBitmapset(); \
 +      token = pg_strtok(&length);             /* skip :allParam */ \
 +      plan_node->allParam = _readBitmapset()
 +
 +/* Read fields of a Scan node */
 +#define READ_SCAN_FIELDS(nodeTypeName) \
 +      Scan *scan_node; \
 +      READ_PLAN_FIELDS(nodeTypeName); \
 +      scan_node = (Scan *) local_node; \
 +      token = pg_strtok(&length);             /* skip :scanrelid */ \
 +      token = pg_strtok(&length);             /* get field value */ \
 +      scan_node->scanrelid = atoi(token)
 +
 +/* Read fields of a Join node */
 +#define READ_JOIN_FIELDS(nodeTypeName) \
 +      Join *join_node; \
 +      READ_PLAN_FIELDS(nodeTypeName); \
 +      join_node = (Join *) local_node; \
 +      token = pg_strtok(&length);             /* skip :jointype */ \
 +      token = pg_strtok(&length);             /* get field value */ \
 +      join_node->jointype = (JoinType) atoi(token); \
 +      token = pg_strtok(&length);             /* skip :joinqual */ \
 +      join_node->joinqual = nodeRead(NULL, 0)
 +
 +/*
 + * Macros to read an identifier and lookup the OID
 + * The identifier depends on object type.
 + */
 +#define NSP_OID(nspname) LookupNamespaceNoError(nspname)
 +
 +/* Read relation identifier and lookup the OID */
 +#define READ_RELID_INTERNAL(relid, warn) \
 +      do { \
 +              char       *nspname; /* namespace name */ \
 +              char       *relname; /* relation name */ \
 +              token = pg_strtok(&length); /* get nspname */ \
 +              nspname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* get relname */ \
 +              relname = nullable_string(token, length); \
 +              if (relname) \
 +              { \
 +                      relid = get_relname_relid(relname, \
 +                                                                                                      NSP_OID(nspname)); \
 +                      if (!OidIsValid((relid)) && (warn)) \
 +                              elog(WARNING, "could not find OID for relation %s.%s", nspname,\
 +                                              relname); \
 +              } \
 +              else \
 +                      relid = InvalidOid; \
 +      } while (0)
 +
 +#define READ_RELID_FIELD_NOWARN(fldname) \
 +      do { \
 +              Oid relid; \
 +              token = pg_strtok(&length);             /* skip :fldname */ \
 +              READ_RELID_INTERNAL(relid, false); \
 +              local_node->fldname = relid; \
 +      } while (0)
 +
 +#define READ_RELID_FIELD(fldname) \
 +      do { \
 +              Oid relid; \
 +              token = pg_strtok(&length);             /* skip :fldname */ \
 +              READ_RELID_INTERNAL(relid, true); \
 +              local_node->fldname = relid; \
 +      } while (0)
 +
 +#define READ_RELID_LIST_FIELD(fldname) \
 +      do { \
 +              token = pg_strtok(&length);             /* skip :fldname */ \
 +              token = pg_strtok(&length);     /* skip '(' */ \
 +              if (length > 0 ) \
 +              { \
 +                      Assert(token[0] == '('); \
 +                      for (;;) \
 +                      { \
 +                              Oid relid; \
 +                              READ_RELID_INTERNAL(relid, true); \
 +                              local_node->fldname = lappend_oid(local_node->fldname, relid); \
 +                              token = pg_strtok(&length); \
 +                              if (token[0] == ')') \
 +                              break; \
 +                      } \
 +              } \
 +              else \
 +                      local_node->fldname = NIL; \
 +      } while (0)
 +
 +/* Read data type identifier and lookup the OID */
 +#define READ_TYPID_FIELD(fldname) \
 +      do { \
 +              char       *nspname; /* namespace name */ \
 +              char       *typname; /* data type name */ \
 +              token = pg_strtok(&length);             /* skip :fldname */ \
 +              token = pg_strtok(&length); /* get nspname */ \
 +              nspname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* get typname */ \
 +              typname = nullable_string(token, length); \
 +              if (typname) \
 +                      local_node->fldname = get_typname_typid(typname, \
 +                                                                                                      NSP_OID(nspname)); \
 +              else \
 +                      local_node->fldname = InvalidOid; \
 +      } while (0)
 +
 +/* Read function identifier and lookup the OID */
 +#define READ_FUNCID_FIELD(fldname) \
 +      do { \
 +              char       *nspname; /* namespace name */ \
 +              char       *funcname; /* function name */ \
 +              int             nargs; /* number of arguments */ \
 +              Oid                *argtypes; /* argument types */ \
 +              token = pg_strtok(&length);             /* skip :fldname */ \
 +              token = pg_strtok(&length); /* get nspname */ \
 +              nspname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* get funcname */ \
 +              funcname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* get nargs */ \
 +              nargs = atoi(token); \
 +              if (funcname) \
 +              { \
 +                      int     i; \
 +                      argtypes = palloc(nargs * sizeof(Oid)); \
 +                      for (i = 0; i < nargs; i++) \
 +                      { \
 +                              char *typnspname; /* argument type namespace */ \
 +                              char *typname; /* argument type name */ \
 +                              token = pg_strtok(&length); /* get type nspname */ \
 +                              typnspname = nullable_string(token, length); \
 +                              token = pg_strtok(&length); /* get type name */ \
 +                              typname = nullable_string(token, length); \
 +                              argtypes[i] = get_typname_typid(typname, \
 +                                                                                              NSP_OID(typnspname)); \
 +                      } \
 +                      local_node->fldname = get_funcid(funcname, \
 +                                                                                       buildoidvector(argtypes, nargs), \
 +                                                                                       NSP_OID(nspname)); \
 +              } \
 +              else \
 +                      local_node->fldname = InvalidOid; \
 +      } while (0)
 +
 +/* Read operator identifier and lookup the OID */
 +#define READ_OPERID_FIELD(fldname) \
 +      do { \
 +              char       *nspname; /* namespace name */ \
 +              char       *oprname; /* operator name */ \
 +              char       *leftnspname; /* left type namespace */ \
 +              char       *leftname; /* left type name */ \
 +              Oid                     oprleft; /* left type */ \
 +              char       *rightnspname; /* right type namespace */ \
 +              char       *rightname; /* right type name */ \
 +              Oid                     oprright; /* right type */ \
 +              token = pg_strtok(&length);             /* skip :fldname */ \
 +              token = pg_strtok(&length); /* get nspname */ \
 +              nspname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* get operator name */ \
 +              oprname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* left type namespace */ \
 +              leftnspname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* left type name */ \
 +              leftname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* right type namespace */ \
 +              rightnspname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* right type name */ \
 +              rightname = nullable_string(token, length); \
 +              if (oprname) \
 +              { \
 +                      if (leftname) \
 +                              oprleft = get_typname_typid(leftname, \
 +                                                                                      NSP_OID(leftnspname)); \
 +                      else \
 +                              oprleft = InvalidOid; \
 +                      if (rightname) \
 +                              oprright = get_typname_typid(rightname, \
 +                                                                                       NSP_OID(rightnspname)); \
 +                      else \
 +                              oprright = InvalidOid; \
 +                      local_node->fldname = get_operid(oprname, \
 +                                                                                       oprleft, \
 +                                                                                       oprright, \
 +                                                                                       NSP_OID(nspname)); \
 +              } \
 +              else \
 +                      local_node->fldname = InvalidOid; \
 +      } while (0)
 +
 +/* Read collation identifier and lookup the OID */
 +#define READ_COLLID_FIELD(fldname) \
 +      do { \
 +              char       *nspname; /* namespace name */ \
 +              char       *collname; /* collation name */ \
 +              int             collencoding; /* collation encoding */ \
 +              token = pg_strtok(&length);             /* skip :fldname */ \
 +              token = pg_strtok(&length); /* get nspname */ \
 +              nspname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* get collname */ \
 +              collname = nullable_string(token, length); \
 +              token = pg_strtok(&length); /* get collencoding */ \
 +              collencoding = atoi(token); \
 +              if (collname) \
 +                      local_node->fldname = get_collid(collname, \
 +                                                                                       collencoding, \
 +                                                                                       NSP_OID(nspname)); \
 +              else \
 +                      local_node->fldname = InvalidOid; \
 +      } while (0)
 +#endif
++=======
+ /* Read an attribute number array */
+ #define READ_ATTRNUMBER_ARRAY(fldname, len) \
+       token = pg_strtok(&length);             /* skip :fldname */ \
+       local_node->fldname = readAttrNumberCols(len);
+ /* Read an oid array */
+ #define READ_OID_ARRAY(fldname, len) \
+       token = pg_strtok(&length);             /* skip :fldname */ \
+       local_node->fldname = readOidCols(len);
+ /* Read an int array */
+ #define READ_INT_ARRAY(fldname, len) \
+       token = pg_strtok(&length);             /* skip :fldname */ \
+       local_node->fldname = readIntCols(len);
+ /* Read a bool array */
+ #define READ_BOOL_ARRAY(fldname, len) \
+       token = pg_strtok(&length);             /* skip :fldname */ \
+       local_node->fldname = readBoolCols(len);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
  
  /* Routine exit */
  #define READ_DONE() \
        ((length) == 0 ? NULL : debackslash(token, length))
  
  
++<<<<<<< HEAD
 +static Datum readDatum(bool typbyval);
 +#ifdef XCP
 +static Datum scanDatum(Oid typid, int typmod);
 +#endif
 +
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
  /*
   * _readBitmapset
   */
@@@ -846,30 -544,12 +888,32 @@@ _readAggref(void
  {
        READ_LOCALS(Aggref);
  
 +#ifdef XCP
 +      if (portable_input)
 +              READ_FUNCID_FIELD(aggfnoid);
 +      else
 +#endif
        READ_OID_FIELD(aggfnoid);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_TYPID_FIELD(aggtype);
 +      else
 +#endif
        READ_OID_FIELD(aggtype);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_COLLID_FIELD(aggcollid);
 +      else
 +#endif
        READ_OID_FIELD(aggcollid);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_COLLID_FIELD(inputcollid);
 +      else
 +#endif
        READ_OID_FIELD(inputcollid);
+       READ_OID_FIELD(aggtranstype);
+       READ_NODE_FIELD(aggargtypes);
        READ_NODE_FIELD(aggdirectargs);
        READ_NODE_FIELD(args);
        READ_NODE_FIELD(aggorder);
@@@ -1043,37 -669,11 +1088,40 @@@ _readOpExpr(void
  {
        READ_LOCALS(OpExpr);
  
 +#ifdef XCP
 +      if (portable_input)
 +              READ_OPERID_FIELD(opno);
 +      else
 +#endif
        READ_OID_FIELD(opno);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_FUNCID_FIELD(opfuncid);
 +      else
 +#endif
        READ_OID_FIELD(opfuncid);
++<<<<<<< HEAD
 +
 +#ifdef XCP
 +      if (portable_input)
 +              READ_TYPID_FIELD(opresulttype);
 +      else
 +#endif
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
        READ_OID_FIELD(opresulttype);
        READ_BOOL_FIELD(opretset);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_COLLID_FIELD(opcollid);
 +      else
 +#endif
        READ_OID_FIELD(opcollid);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_COLLID_FIELD(inputcollid);
 +      else
 +#endif
        READ_OID_FIELD(inputcollid);
        READ_NODE_FIELD(args);
        READ_LOCATION_FIELD(location);
@@@ -1089,37 -689,11 +1137,40 @@@ _readDistinctExpr(void
  {
        READ_LOCALS(DistinctExpr);
  
 +#ifdef XCP
 +      if (portable_input)
 +              READ_OPERID_FIELD(opno);
 +      else
 +#endif
        READ_OID_FIELD(opno);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_FUNCID_FIELD(opfuncid);
 +      else
 +#endif
        READ_OID_FIELD(opfuncid);
++<<<<<<< HEAD
 +
 +#ifdef XCP
 +      if (portable_input)
 +              READ_TYPID_FIELD(opresulttype);
 +      else
 +#endif
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
        READ_OID_FIELD(opresulttype);
        READ_BOOL_FIELD(opretset);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_COLLID_FIELD(opcollid);
 +      else
 +#endif
        READ_OID_FIELD(opcollid);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_COLLID_FIELD(inputcollid);
 +      else
 +#endif
        READ_OID_FIELD(inputcollid);
        READ_NODE_FIELD(args);
        READ_LOCATION_FIELD(location);
@@@ -1135,51 -709,11 +1186,54 @@@ _readNullIfExpr(void
  {
        READ_LOCALS(NullIfExpr);
  
 +#ifdef XCP
 +      if (portable_input)
 +              READ_OPERID_FIELD(opno);
 +      else
 +#endif
        READ_OID_FIELD(opno);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_FUNCID_FIELD(opfuncid);
 +      else
 +#endif
        READ_OID_FIELD(opfuncid);
++<<<<<<< HEAD
 +
 +      /*
 +       * The opfuncid is stored in the textual format primarily for debugging
 +       * and documentation reasons.  We want to always read it as zero to force
 +       * it to be re-looked-up in the pg_operator entry.  This ensures that
 +       * stored rules don't have hidden dependencies on operators' functions.
 +       * (We don't currently support an ALTER OPERATOR command, but might
 +       * someday.)
 +       */
 +#ifdef XCP
 +      /* Do not invalidate if we have just looked up the value */
 +      if (!portable_input)
 +#endif
 +      local_node->opfuncid = InvalidOid;
 +
 +#ifdef XCP
 +      if (portable_input)
 +              READ_TYPID_FIELD(opresulttype);
 +      else
 +#endif
++=======
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
        READ_OID_FIELD(opresulttype);
        READ_BOOL_FIELD(opretset);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_COLLID_FIELD(opcollid);
 +      else
 +#endif
        READ_OID_FIELD(opcollid);
 +#ifdef XCP
 +      if (portable_input)
 +              READ_COLLID_FIELD(inputcollid);
 +      else
 +#endif
        READ_OID_FIELD(inputcollid);
        READ_NODE_FIELD(args);
        READ_LOCATION_FIELD(location);
@@@ -2051,27 -1353,84 +2105,32 @@@ _readTableSampleClause(void
  {
        READ_LOCALS(TableSampleClause);
  
++<<<<<<< HEAD
 +#ifdef XCP
 +      if (portable_input)
 +      {
 +              READ_FUNCID_FIELD(tsmhandler);
 +      }
 +      else
 +      {
 +#endif
 +      READ_OID_FIELD(tsmhandler);
 +#ifdef XCP
 +      }
 +#endif
++=======
+       READ_OID_FIELD(tsmhandler);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
        READ_NODE_FIELD(args);
        READ_NODE_FIELD(repeatable);
  
        READ_DONE();
  }
  
 -/*
 - * _readDefElem
 - */
 -static DefElem *
 -_readDefElem(void)
 -{
 -      READ_LOCALS(DefElem);
 -
 -      READ_STRING_FIELD(defnamespace);
 -      READ_STRING_FIELD(defname);
 -      READ_NODE_FIELD(arg);
 -      READ_ENUM_FIELD(defaction, DefElemAction);
 -
 -      READ_DONE();
 -}
 -
 -/*
 - * _readPlannedStmt
 - */
 -static PlannedStmt *
 -_readPlannedStmt(void)
 -{
 -      READ_LOCALS(PlannedStmt);
 -
 -      READ_ENUM_FIELD(commandType, CmdType);
 -      READ_UINT_FIELD(queryId);
 -      READ_BOOL_FIELD(hasReturning);
 -      READ_BOOL_FIELD(hasModifyingCTE);
 -      READ_BOOL_FIELD(canSetTag);
 -      READ_BOOL_FIELD(transientPlan);
 -      READ_BOOL_FIELD(dependsOnRole);
 -      READ_BOOL_FIELD(parallelModeNeeded);
 -      READ_NODE_FIELD(planTree);
 -      READ_NODE_FIELD(rtable);
 -      READ_NODE_FIELD(resultRelations);
 -      READ_NODE_FIELD(utilityStmt);
 -      READ_NODE_FIELD(subplans);
 -      READ_BITMAPSET_FIELD(rewindPlanIDs);
 -      READ_NODE_FIELD(rowMarks);
 -      READ_NODE_FIELD(relationOids);
 -      READ_NODE_FIELD(invalItems);
 -      READ_INT_FIELD(nParamExec);
 -
 -      READ_DONE();
 -}
 -
 -/*
 - * ReadCommonPlan
 - *    Assign the basic stuff of all nodes that inherit from Plan
 - */
 -static void
 -ReadCommonPlan(Plan *local_node)
 -{
 -      READ_TEMP_LOCALS();
 -
 -      READ_FLOAT_FIELD(startup_cost);
 -      READ_FLOAT_FIELD(total_cost);
 -      READ_FLOAT_FIELD(plan_rows);
 -      READ_INT_FIELD(plan_width);
 -      READ_BOOL_FIELD(parallel_aware);
 -      READ_INT_FIELD(plan_node_id);
 -      READ_NODE_FIELD(targetlist);
 -      READ_NODE_FIELD(qual);
 -      READ_NODE_FIELD(lefttree);
 -      READ_NODE_FIELD(righttree);
 -      READ_NODE_FIELD(initPlan);
 -      READ_BITMAPSET_FIELD(extParam);
 -      READ_BITMAPSET_FIELD(allParam);
 -}
  
 +#ifdef XCP
  /*
++<<<<<<< HEAD
   * _readPlan
   */
  static Plan *
@@@ -2263,1486 -1537,1108 +2322,2706 @@@ _readRecursiveUnion(void
  
        READ_INT_FIELD(wtParam);
        READ_INT_FIELD(numCols);
 -      READ_ATTRNUMBER_ARRAY(dupColIdx, local_node->numCols);
 -      READ_OID_ARRAY(dupOperators, local_node->numCols);
 -      READ_LONG_FIELD(numGroups);
 +
 +      token = pg_strtok(&length);             /* skip :dupColIdx */
 +      local_node->dupColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->dupColIdx[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :dupOperators */
 +      local_node->dupOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->dupOperators[i] = atooid(token);
 +      }
 +
 +      READ_LONG_FIELD(numGroups);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readBitmapAnd
 + */
 +static BitmapAnd *
 +_readBitmapAnd(void)
 +{
 +      READ_PLAN_FIELDS(BitmapAnd);
 +
 +      READ_NODE_FIELD(bitmapplans);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readBitmapOr
 + */
 +static BitmapOr *
 +_readBitmapOr(void)
 +{
 +      READ_PLAN_FIELDS(BitmapOr);
 +
 +      READ_NODE_FIELD(bitmapplans);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readScan
 + */
 +static Scan *
 +_readScan(void)
 +{
 +      READ_SCAN_FIELDS(Scan);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readSeqScan
 + */
 +static SeqScan *
 +_readSeqScan(void)
 +{
 +      READ_SCAN_FIELDS(SeqScan);
 +
 +      READ_DONE();
 +}
 +
 +/*
 + * _readSampleScan
 + */
 +static SampleScan *
 +_readSampleScan(void)
 +{
 +      READ_SCAN_FIELDS(SampleScan);
 +      READ_NODE_FIELD(tablesample);
 +
 +      READ_DONE();
 +}
 +
 +/*
 + * _readIndexScan
 + */
 +static IndexScan *
 +_readIndexScan(void)
 +{
 +      READ_SCAN_FIELDS(IndexScan);
 +
 +      if (portable_input)
 +              READ_RELID_FIELD(indexid);
 +      else
 +              READ_OID_FIELD(indexid);
 +      READ_NODE_FIELD(indexqual);
 +      READ_NODE_FIELD(indexqualorig);
 +      READ_NODE_FIELD(indexorderby);
 +      READ_NODE_FIELD(indexorderbyorig);
 +      READ_NODE_FIELD(indexorderbyops);
 +      READ_ENUM_FIELD(indexorderdir, ScanDirection);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readIndexOnlyScan
 + */
 +static IndexOnlyScan *
 +_readIndexOnlyScan(void)
 +{
 +      READ_SCAN_FIELDS(IndexOnlyScan);
 +
 +      if (portable_input)
 +              READ_RELID_FIELD(indexid);
 +      else
 +              READ_OID_FIELD(indexid);
 +      READ_NODE_FIELD(indexqual);
 +      READ_NODE_FIELD(indexorderby);
 +      READ_NODE_FIELD(indextlist);
 +      READ_ENUM_FIELD(indexorderdir, ScanDirection);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readBitmapIndexScan
 + */
 +static BitmapIndexScan *
 +_readBitmapIndexScan(void)
 +{
 +      READ_SCAN_FIELDS(BitmapIndexScan);
 +
 +      if (portable_input)
 +              READ_RELID_FIELD(indexid);
 +      else
 +              READ_OID_FIELD(indexid);
 +      READ_NODE_FIELD(indexqual);
 +      READ_NODE_FIELD(indexqualorig);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readBitmapHeapScan
 + */
 +static BitmapHeapScan *
 +_readBitmapHeapScan(void)
 +{
 +      READ_SCAN_FIELDS(BitmapHeapScan);
 +
 +      READ_NODE_FIELD(bitmapqualorig);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readTidScan
 + */
 +static TidScan *
 +_readTidScan(void)
 +{
 +      READ_SCAN_FIELDS(TidScan);
 +
 +      READ_NODE_FIELD(tidquals);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readSubqueryScan
 + */
 +static SubqueryScan *
 +_readSubqueryScan(void)
 +{
 +      READ_SCAN_FIELDS(SubqueryScan);
 +
 +      READ_NODE_FIELD(subplan);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readFunctionScan
 + */
 +static FunctionScan *
 +_readFunctionScan(void)
 +{
 +      READ_SCAN_FIELDS(FunctionScan);
 +
 +      READ_NODE_FIELD(functions);
 +      READ_BOOL_FIELD(funcordinality);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readValuesScan
 + */
 +static ValuesScan *
 +_readValuesScan(void)
 +{
 +      READ_SCAN_FIELDS(ValuesScan);
 +
 +      READ_NODE_FIELD(values_lists);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readCteScan
 + */
 +static CteScan *
 +_readCteScan(void)
 +{
 +      READ_SCAN_FIELDS(CteScan);
 +
 +      READ_INT_FIELD(ctePlanId);
 +      READ_INT_FIELD(cteParam);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readWorkTableScan
 + */
 +static WorkTableScan *
 +_readWorkTableScan(void)
 +{
 +      READ_SCAN_FIELDS(WorkTableScan);
 +
 +      READ_INT_FIELD(wtParam);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readJoin
 + */
 +static Join *
 +_readJoin(void)
 +{
 +      READ_JOIN_FIELDS(Join);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readNestLoop
 + */
 +static NestLoop *
 +_readNestLoop(void)
 +{
 +      READ_JOIN_FIELDS(NestLoop);
 +
 +      READ_NODE_FIELD(nestParams);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readMergeJoin
 + */
 +static MergeJoin *
 +_readMergeJoin(void)
 +{
 +      int                     numCols;
 +      int                     i;
 +      READ_JOIN_FIELDS(MergeJoin);
 +
 +      READ_NODE_FIELD(mergeclauses);
 +      numCols = list_length(local_node->mergeclauses);
 +
 +
 +      token = pg_strtok(&length);             /* skip :mergeFamilies */
 +      local_node->mergeFamilies = (Oid *) palloc(numCols * sizeof(Oid));
 +      for (i = 0; i < numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->mergeFamilies[i] = atooid(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :mergeCollations */
 +      local_node->mergeCollations = (Oid *) palloc(numCols * sizeof(Oid));
 +      for (i = 0; i < numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *collname; /* collation name */
 +                      int             collencoding; /* collation encoding */
 +                      /* the token is already read */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get collname */
 +                      collname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get nargs */
 +                      collencoding = atoi(token);
 +                      if (collname)
 +                              local_node->mergeCollations[i] = get_collid(collname,
 +                                                                                                                      collencoding,
 +                                                                                                                      NSP_OID(nspname));
 +                      else
 +                              local_node->mergeCollations[i] = InvalidOid;
 +              }
 +              else
 +              local_node->mergeCollations[i] = atooid(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :mergeStrategies */
 +      local_node->mergeStrategies = (int *) palloc(numCols * sizeof(int));
 +      for (i = 0; i < numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->mergeStrategies[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :mergeNullsFirst */
 +      local_node->mergeNullsFirst = (bool *) palloc(numCols * sizeof(bool));
 +      for (i = 0; i < numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->mergeNullsFirst[i] = strtobool(token);
 +      }
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readHashJoin
 + */
 +static HashJoin *
 +_readHashJoin(void)
 +{
 +      READ_JOIN_FIELDS(HashJoin);
 +
 +      READ_NODE_FIELD(hashclauses);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readMaterial
 + */
 +static Material *
 +_readMaterial(void)
 +{
 +      READ_PLAN_FIELDS(Material);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readSort
 + */
 +static Sort *
 +_readSort(void)
 +{
 +      int i;
 +      READ_PLAN_FIELDS(Sort);
 +
 +      READ_INT_FIELD(numCols);
 +
 +      token = pg_strtok(&length);             /* skip :sortColIdx */
 +      local_node->sortColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->sortColIdx[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :sortOperators */
 +      local_node->sortOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *oprname; /* operator name */
 +                      char       *leftnspname; /* left type namespace */
 +                      char       *leftname; /* left type name */
 +                      Oid                     oprleft; /* left type */
 +                      char       *rightnspname; /* right type namespace */
 +                      char       *rightname; /* right type name */
 +                      Oid                     oprright; /* right type */
 +                      /* token is already set to nspname */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get operator name */
 +                      oprname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type namespace */
 +                      leftnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type name */
 +                      leftname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type namespace */
 +                      rightnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type name */
 +                      rightname = nullable_string(token, length);
 +                      if (leftname)
 +                              oprleft = get_typname_typid(leftname,
 +                                                                                      NSP_OID(leftnspname));
 +                      else
 +                              oprleft = InvalidOid;
 +                      if (rightname)
 +                              oprright = get_typname_typid(rightname,
 +                                                                                       NSP_OID(rightnspname));
 +                      else
 +                              oprright = InvalidOid;
 +                      local_node->sortOperators[i] = get_operid(oprname,
 +                                                                                                        oprleft,
 +                                                                                                        oprright,
 +                                                                                                        NSP_OID(nspname));
 +              }
 +              else
 +              local_node->sortOperators[i] = atooid(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :collations */
 +      local_node->collations = (Oid *) palloc(local_node->numCols * sizeof(Oid));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *collname; /* collation name */
 +                      int             collencoding; /* collation encoding */
 +                      /* the token is already read */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get collname */
 +                      collname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get nargs */
 +                      collencoding = atoi(token);
 +                      if (collname)
 +                              local_node->collations[i] = get_collid(collname,
 +                                                                                                         collencoding,
 +                                                                                                         NSP_OID(nspname));
 +                      else
 +                              local_node->collations[i] = InvalidOid;
 +              }
 +              else
 +              local_node->collations[i] = atooid(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :nullsFirst */
 +      local_node->nullsFirst = (bool *) palloc(local_node->numCols * sizeof(bool));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->nullsFirst[i] = strtobool(token);
 +      }
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readGroup
 + */
 +static Group *
 +_readGroup(void)
 +{
 +      int i;
 +      READ_PLAN_FIELDS(Group);
 +
 +      READ_INT_FIELD(numCols);
 +
 +      token = pg_strtok(&length);             /* skip :grpColIdx */
 +      local_node->grpColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->grpColIdx[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :grpOperators */
 +      local_node->grpOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *oprname; /* operator name */
 +                      char       *leftnspname; /* left type namespace */
 +                      char       *leftname; /* left type name */
 +                      Oid                     oprleft; /* left type */
 +                      char       *rightnspname; /* right type namespace */
 +                      char       *rightname; /* right type name */
 +                      Oid                     oprright; /* right type */
 +                      /* token is already set to nspname */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get operator name */
 +                      oprname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type namespace */
 +                      leftnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type name */
 +                      leftname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type namespace */
 +                      rightnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type name */
 +                      rightname = nullable_string(token, length);
 +                      if (leftname)
 +                              oprleft = get_typname_typid(leftname,
 +                                                                                      NSP_OID(leftnspname));
 +                      else
 +                              oprleft = InvalidOid;
 +                      if (rightname)
 +                              oprright = get_typname_typid(rightname,
 +                                                                                       NSP_OID(rightnspname));
 +                      else
 +                              oprright = InvalidOid;
 +                      local_node->grpOperators[i] = get_operid(oprname,
 +                                                                                                       oprleft,
 +                                                                                                       oprright,
 +                                                                                                       NSP_OID(nspname));
 +              }
 +              else
 +                      local_node->grpOperators[i] = atooid(token);
 +      }
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readAgg
 + */
 +static Agg *
 +_readAgg(void)
 +{
 +      int i;
 +      READ_PLAN_FIELDS(Agg);
 +
 +      READ_ENUM_FIELD(aggstrategy, AggStrategy);
 +      READ_ENUM_FIELD(aggdistribution, AggDistribution);
 +      READ_INT_FIELD(numCols);
 +
 +      token = pg_strtok(&length);             /* skip :grpColIdx */
 +      local_node->grpColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->grpColIdx[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :grpOperators */
 +      local_node->grpOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *oprname; /* operator name */
 +                      char       *leftnspname; /* left type namespace */
 +                      char       *leftname; /* left type name */
 +                      Oid                     oprleft; /* left type */
 +                      char       *rightnspname; /* right type namespace */
 +                      char       *rightname; /* right type name */
 +                      Oid                     oprright; /* right type */
 +                      /* token is already set to nspname */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get operator name */
 +                      oprname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type namespace */
 +                      leftnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type name */
 +                      leftname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type namespace */
 +                      rightnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type name */
 +                      rightname = nullable_string(token, length);
 +                      if (leftname)
 +                              oprleft = get_typname_typid(leftname,
 +                                                                                      NSP_OID(leftnspname));
 +                      else
 +                              oprleft = InvalidOid;
 +                      if (rightname)
 +                              oprright = get_typname_typid(rightname,
 +                                                                                       NSP_OID(rightnspname));
 +                      else
 +                              oprright = InvalidOid;
 +                      local_node->grpOperators[i] = get_operid(oprname,
 +                                                                                                       oprleft,
 +                                                                                                       oprright,
 +                                                                                                       NSP_OID(nspname));
 +              }
 +              else
 +                      local_node->grpOperators[i] = atooid(token);
 +      }
 +
 +      READ_LONG_FIELD(numGroups);
 +
 +      READ_NODE_FIELD(groupingSets);
 +      READ_NODE_FIELD(chain);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readWindowAgg
 + */
 +static WindowAgg *
 +_readWindowAgg(void)
 +{
 +      int i;
 +      READ_PLAN_FIELDS(WindowAgg);
 +
 +      READ_INT_FIELD(winref);
 +      READ_INT_FIELD(partNumCols);
 +
 +      token = pg_strtok(&length);             /* skip :partColIdx */
 +      local_node->partColIdx = (AttrNumber *) palloc(local_node->partNumCols * sizeof(AttrNumber));
 +      for (i = 0; i < local_node->partNumCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->partColIdx[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :partOperators */
 +      local_node->partOperators = (Oid *) palloc(local_node->partNumCols * sizeof(Oid));
 +      for (i = 0; i < local_node->partNumCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *oprname; /* operator name */
 +                      char       *leftnspname; /* left type namespace */
 +                      char       *leftname; /* left type name */
 +                      Oid                     oprleft; /* left type */
 +                      char       *rightnspname; /* right type namespace */
 +                      char       *rightname; /* right type name */
 +                      Oid                     oprright; /* right type */
 +                      /* token is already set to nspname */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get operator name */
 +                      oprname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type namespace */
 +                      leftnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type name */
 +                      leftname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type namespace */
 +                      rightnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type name */
 +                      rightname = nullable_string(token, length);
 +                      if (leftname)
 +                              oprleft = get_typname_typid(leftname,
 +                                                                                      NSP_OID(leftnspname));
 +                      else
 +                              oprleft = InvalidOid;
 +                      if (rightname)
 +                              oprright = get_typname_typid(rightname,
 +                                                                                       NSP_OID(rightnspname));
 +                      else
 +                              oprright = InvalidOid;
 +                      local_node->partOperators[i] = get_operid(oprname,
 +                                                                                                        oprleft,
 +                                                                                                        oprright,
 +                                                                                                        NSP_OID(nspname));
 +              }
 +              else
 +                      local_node->partOperators[i] = atooid(token);
 +      }
 +
 +      READ_INT_FIELD(ordNumCols);
 +
 +      token = pg_strtok(&length);             /* skip :ordColIdx */
 +      local_node->ordColIdx = (AttrNumber *) palloc(local_node->ordNumCols * sizeof(AttrNumber));
 +      for (i = 0; i < local_node->ordNumCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->ordColIdx[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :ordOperators */
 +      local_node->ordOperators = (Oid *) palloc(local_node->ordNumCols * sizeof(Oid));
 +      for (i = 0; i < local_node->ordNumCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *oprname; /* operator name */
 +                      char       *leftnspname; /* left type namespace */
 +                      char       *leftname; /* left type name */
 +                      Oid                     oprleft; /* left type */
 +                      char       *rightnspname; /* right type namespace */
 +                      char       *rightname; /* right type name */
 +                      Oid                     oprright; /* right type */
 +                      /* token is already set to nspname */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get operator name */
 +                      oprname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type namespace */
 +                      leftnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type name */
 +                      leftname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type namespace */
 +                      rightnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type name */
 +                      rightname = nullable_string(token, length);
 +                      if (leftname)
 +                              oprleft = get_typname_typid(leftname,
 +                                                                                      NSP_OID(leftnspname));
 +                      else
 +                              oprleft = InvalidOid;
 +                      if (rightname)
 +                              oprright = get_typname_typid(rightname,
 +                                                                                       NSP_OID(rightnspname));
 +                      else
 +                              oprright = InvalidOid;
 +                      local_node->ordOperators[i] = get_operid(oprname,
 +                                                                                                       oprleft,
 +                                                                                                       oprright,
 +                                                                                                       NSP_OID(nspname));
 +              }
 +              else
 +                      local_node->ordOperators[i] = atooid(token);
 +      }
 +
 +      READ_INT_FIELD(frameOptions);
 +      READ_NODE_FIELD(startOffset);
 +      READ_NODE_FIELD(endOffset);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readUnique
 + */
 +static Unique *
 +_readUnique(void)
 +{
 +      int i;
 +      READ_PLAN_FIELDS(Unique);
 +
 +      READ_INT_FIELD(numCols);
 +
 +      token = pg_strtok(&length);             /* skip :uniqColIdx */
 +      local_node->uniqColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->uniqColIdx[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :uniqOperators */
 +      local_node->uniqOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *oprname; /* operator name */
 +                      char       *leftnspname; /* left type namespace */
 +                      char       *leftname; /* left type name */
 +                      Oid                     oprleft; /* left type */
 +                      char       *rightnspname; /* right type namespace */
 +                      char       *rightname; /* right type name */
 +                      Oid                     oprright; /* right type */
 +                      /* token is already set to nspname */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get operator name */
 +                      oprname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type namespace */
 +                      leftnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type name */
 +                      leftname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type namespace */
 +                      rightnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type name */
 +                      rightname = nullable_string(token, length);
 +                      if (leftname)
 +                              oprleft = get_typname_typid(leftname,
 +                                                                                      NSP_OID(leftnspname));
 +                      else
 +                              oprleft = InvalidOid;
 +                      if (rightname)
 +                              oprright = get_typname_typid(rightname,
 +                                                                                       NSP_OID(rightnspname));
 +                      else
 +                              oprright = InvalidOid;
 +                      local_node->uniqOperators[i] = get_operid(oprname,
 +                                                                                                        oprleft,
 +                                                                                                        oprright,
 +                                                                                                        NSP_OID(nspname));
 +              }
 +              else
 +                      local_node->uniqOperators[i] = atooid(token);
 +      }
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readHash
 + */
 +static Hash *
 +_readHash(void)
 +{
 +      READ_PLAN_FIELDS(Hash);
 +
 +      if (portable_input)
 +              READ_RELID_FIELD(skewTable);
 +      else
 +              READ_OID_FIELD(skewTable);
 +      READ_INT_FIELD(skewColumn);
 +      READ_BOOL_FIELD(skewInherit);
 +      if (portable_input)
 +              READ_TYPID_FIELD(skewColType);
 +      else
 +              READ_OID_FIELD(skewColType);
 +      READ_INT_FIELD(skewColTypmod);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readSetOp
 + */
 +static SetOp *
 +_readSetOp(void)
 +{
 +      int i;
 +      READ_PLAN_FIELDS(SetOp);
 +
 +      READ_ENUM_FIELD(cmd, SetOpCmd);
 +      READ_ENUM_FIELD(strategy, SetOpStrategy);
 +      READ_INT_FIELD(numCols);
 +
 +      token = pg_strtok(&length);             /* skip :dupColIdx */
 +      local_node->dupColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->dupColIdx[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :dupOperators */
 +      local_node->dupOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->dupOperators[i] = atooid(token);
 +      }
 +
 +      READ_INT_FIELD(flagColIdx);
 +      READ_INT_FIELD(firstFlag);
 +      READ_LONG_FIELD(numGroups);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readLimit
 + */
 +static Limit *
 +_readLimit(void)
 +{
 +      READ_PLAN_FIELDS(Limit);
 +
 +      READ_NODE_FIELD(limitOffset);
 +      READ_NODE_FIELD(limitCount);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readRemoteSubplan
 + */
 +static RemoteSubplan *
 +_readRemoteSubplan(void)
 +{
 +      READ_SCAN_FIELDS(RemoteSubplan);
 +
 +      READ_CHAR_FIELD(distributionType);
 +      READ_INT_FIELD(distributionKey);
 +      READ_NODE_FIELD(distributionNodes);
 +      READ_NODE_FIELD(distributionRestrict);
 +      READ_NODE_FIELD(nodeList);
 +      READ_BOOL_FIELD(execOnAll);
 +      READ_NODE_FIELD(sort);
 +      READ_STRING_FIELD(cursor);
 +      READ_INT_FIELD(unique);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readRemoteStmt
 + */
 +static RemoteStmt *
 +_readRemoteStmt(void)
 +{
 +      int i;
 +      READ_LOCALS(RemoteStmt);
 +
 +      READ_ENUM_FIELD(commandType, CmdType);
 +      READ_BOOL_FIELD(hasReturning);
 +      READ_NODE_FIELD(planTree);
 +      READ_NODE_FIELD(rtable);
 +      READ_NODE_FIELD(resultRelations);
 +      READ_NODE_FIELD(subplans);
 +      READ_INT_FIELD(nParamExec);
 +      READ_INT_FIELD(nParamRemote);
 +      if (local_node->nParamRemote > 0)
 +      {
 +              local_node->remoteparams = (RemoteParam *) palloc(
 +                              local_node->nParamRemote * sizeof(RemoteParam));
 +              for (i = 0; i < local_node->nParamRemote; i++)
 +              {
 +                      RemoteParam *rparam = &(local_node->remoteparams[i]);
 +                      token = pg_strtok(&length); /* skip  :paramkind */
 +                      token = pg_strtok(&length);
 +                      rparam->paramkind = (ParamKind) atoi(token);
 +
 +                      token = pg_strtok(&length); /* skip  :paramid */
 +                      token = pg_strtok(&length);
 +                      rparam->paramid = atoi(token);
 +
 +                      token = pg_strtok(&length); /* skip  :paramused */
 +                      token = pg_strtok(&length);
 +                      rparam->paramused = atoi(token);
 +
 +                      token = pg_strtok(&length); /* skip  :paramtype */
 +                      if (portable_input)
 +                      {
 +                              char       *nspname; /* namespace name */
 +                              char       *typname; /* data type name */
 +                              token = pg_strtok(&length); /* get nspname */
 +                              nspname = nullable_string(token, length);
 +                              token = pg_strtok(&length); /* get typname */
 +                              typname = nullable_string(token, length);
 +                              if (typname)
 +                                      rparam->paramtype = get_typname_typid(typname,
 +                                                                                                                NSP_OID(nspname));
 +                              else
 +                                      rparam->paramtype = InvalidOid;
 +                      }
 +                      else
 +                      {
 +                              token = pg_strtok(&length);
 +                              rparam->paramtype = atooid(token);
 +                      }
 +              }
 +      }
 +      else
 +              local_node->remoteparams = NULL;
 +
 +      READ_NODE_FIELD(rowMarks);
 +      READ_CHAR_FIELD(distributionType);
 +      READ_INT_FIELD(distributionKey);
 +      READ_NODE_FIELD(distributionNodes);
 +      READ_NODE_FIELD(distributionRestrict);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readSimpleSort
 + */
 +static SimpleSort *
 +_readSimpleSort(void)
 +{
 +      int i;
 +      READ_LOCALS(SimpleSort);
 +
 +      READ_INT_FIELD(numCols);
 +
 +      token = pg_strtok(&length);             /* skip :sortColIdx */
 +      local_node->sortColIdx = (AttrNumber *) palloc(local_node->numCols * sizeof(AttrNumber));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->sortColIdx[i] = atoi(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :sortOperators */
 +      local_node->sortOperators = (Oid *) palloc(local_node->numCols * sizeof(Oid));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *oprname; /* operator name */
 +                      char       *leftnspname; /* left type namespace */
 +                      char       *leftname; /* left type name */
 +                      Oid                     oprleft; /* left type */
 +                      char       *rightnspname; /* right type namespace */
 +                      char       *rightname; /* right type name */
 +                      Oid                     oprright; /* right type */
 +                      /* token is already set to nspname */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get operator name */
 +                      oprname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type namespace */
 +                      leftnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* left type name */
 +                      leftname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type namespace */
 +                      rightnspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* right type name */
 +                      rightname = nullable_string(token, length);
 +                      if (leftname)
 +                              oprleft = get_typname_typid(leftname,
 +                                                                                      NSP_OID(leftnspname));
 +                      else
 +                              oprleft = InvalidOid;
 +                      if (rightname)
 +                              oprright = get_typname_typid(rightname,
 +                                                                                       NSP_OID(rightnspname));
 +                      else
 +                              oprright = InvalidOid;
 +                      local_node->sortOperators[i] = get_operid(oprname,
 +                                                                                                        oprleft,
 +                                                                                                        oprright,
 +                                                                                                        NSP_OID(nspname));
 +              }
 +              else
 +                      local_node->sortOperators[i] = atooid(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :sortCollations */
 +      local_node->sortCollations = (Oid *) palloc(local_node->numCols * sizeof(Oid));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              if (portable_input)
 +              {
 +                      char       *nspname; /* namespace name */
 +                      char       *collname; /* collation name */
 +                      int             collencoding; /* collation encoding */
 +                      /* the token is already read */
 +                      nspname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get collname */
 +                      collname = nullable_string(token, length);
 +                      token = pg_strtok(&length); /* get nargs */
 +                      collencoding = atoi(token);
 +                      if (collname)
 +                              local_node->sortCollations[i] = get_collid(collname,
 +                                                                                                         collencoding,
 +                                                                                                         NSP_OID(nspname));
 +                      else
 +                              local_node->sortCollations[i] = InvalidOid;
 +              }
 +              else
 +                      local_node->sortCollations[i] = atooid(token);
 +      }
 +
 +      token = pg_strtok(&length);             /* skip :nullsFirst */
 +      local_node->nullsFirst = (bool *) palloc(local_node->numCols * sizeof(bool));
 +      for (i = 0; i < local_node->numCols; i++)
 +      {
 +              token = pg_strtok(&length);
 +              local_node->nullsFirst[i] = strtobool(token);
 +      }
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readNestLoopParam
 + */
 +static NestLoopParam *
 +_readNestLoopParam(void)
 +{
 +      READ_LOCALS(NestLoopParam);
 +
 +      READ_INT_FIELD(paramno);
 +      READ_NODE_FIELD(paramval);
 +
 +      READ_DONE();
 +}
 +
 +
 +/*
 + * _readPlanRowMark
 + */
 +static PlanRowMark *
 +_readPlanRowMark(void)
 +{
 +      READ_LOCALS(PlanRowMark);
 +
 +      READ_UINT_FIELD(rti);
 +      READ_UINT_FIELD(prti);
 +      READ_UINT_FIELD(rowmarkId);
 +      READ_ENUM_FIELD(markType, RowMarkType);
 +      READ_INT_FIELD(allMarkTypes);
 +      READ_ENUM_FIELD(strength, LockClauseStrength);
 +      READ_ENUM_FIELD(waitPolicy, LockWaitPolicy);
 +      READ_BOOL_FIELD(isParent);
 +
 +      READ_DONE();
 +}
 +
 +/*
 + * _readLockRows
 + */
 +static LockRows *
 +_readLockRows(void)
 +{
 +      READ_PLAN_FIELDS(LockRows);
 +
 +      READ_NODE_FIELD(rowMarks);
 +      READ_INT_FIELD(epqParam);
 +
 +      READ_DONE();
 +}
 +
 +#endif /* XCP */
 +
 +
 +/*
 + * parseNodeString
 + *
 + * Given a character string representing a node tree, parseNodeString creates
 + * the internal node structure.
 + *
 + * The string to be read must already have been loaded into pg_strtok().
 + */
 +Node *
 +parseNodeString(void)
 +{
 +      void       *return_value;
 +
 +      READ_TEMP_LOCALS();
 +
 +      token = pg_strtok(&length);
 +
 +#define MATCH(tokname, namelen) \
 +      (length == namelen && memcmp(token, tokname, namelen) == 0)
 +
 +      if (MATCH("QUERY", 5))
 +              return_value = _readQuery();
 +      else if (MATCH("WITHCHECKOPTION", 15))
 +              return_value = _readWithCheckOption();
 +      else if (MATCH("SORTGROUPCLAUSE", 15))
 +              return_value = _readSortGroupClause();
 +      else if (MATCH("GROUPINGSET", 11))
 +              return_value = _readGroupingSet();
 +      else if (MATCH("WINDOWCLAUSE", 12))
 +              return_value = _readWindowClause();
 +      else if (MATCH("ROWMARKCLAUSE", 13))
 +              return_value = _readRowMarkClause();
 +      else if (MATCH("COMMONTABLEEXPR", 15))
 +              return_value = _readCommonTableExpr();
 +      else if (MATCH("SETOPERATIONSTMT", 16))
 +              return_value = _readSetOperationStmt();
 +      else if (MATCH("ALIAS", 5))
 +              return_value = _readAlias();
 +      else if (MATCH("RANGEVAR", 8))
 +              return_value = _readRangeVar();
 +      else if (MATCH("INTOCLAUSE", 10))
 +              return_value = _readIntoClause();
 +      else if (MATCH("VAR", 3))
 +              return_value = _readVar();
 +      else if (MATCH("CONST", 5))
 +              return_value = _readConst();
 +      else if (MATCH("PARAM", 5))
 +              return_value = _readParam();
 +      else if (MATCH("AGGREF", 6))
 +              return_value = _readAggref();
 +      else if (MATCH("GROUPINGFUNC", 12))
 +              return_value = _readGroupingFunc();
 +      else if (MATCH("WINDOWFUNC", 10))
 +              return_value = _readWindowFunc();
 +      else if (MATCH("ARRAYREF", 8))
 +              return_value = _readArrayRef();
 +      else if (MATCH("FUNCEXPR", 8))
 +              return_value = _readFuncExpr();
 +      else if (MATCH("NAMEDARGEXPR", 12))
 +              return_value = _readNamedArgExpr();
 +      else if (MATCH("OPEXPR", 6))
 +              return_value = _readOpExpr();
 +      else if (MATCH("DISTINCTEXPR", 12))
 +              return_value = _readDistinctExpr();
 +      else if (MATCH("NULLIFEXPR", 10))
 +              return_value = _readNullIfExpr();
 +      else if (MATCH("SCALARARRAYOPEXPR", 17))
 +              return_value = _readScalarArrayOpExpr();
 +      else if (MATCH("BOOLEXPR", 8))
 +              return_value = _readBoolExpr();
 +      else if (MATCH("SUBLINK", 7))
 +              return_value = _readSubLink();
 +#ifdef XCP
 +      else if (MATCH("SUBPLAN", 7))
 +              return_value = _readSubPlan();
 +#endif
 +      else if (MATCH("FIELDSELECT", 11))
 +              return_value = _readFieldSelect();
 +      else if (MATCH("FIELDSTORE", 10))
 +              return_value = _readFieldStore();
 +      else if (MATCH("RELABELTYPE", 11))
 +              return_value = _readRelabelType();
 +      else if (MATCH("COERCEVIAIO", 11))
 +              return_value = _readCoerceViaIO();
 +      else if (MATCH("ARRAYCOERCEEXPR", 15))
 +              return_value = _readArrayCoerceExpr();
 +      else if (MATCH("CONVERTROWTYPEEXPR", 18))
 +              return_value = _readConvertRowtypeExpr();
 +      else if (MATCH("COLLATE", 7))
 +              return_value = _readCollateExpr();
 +      else if (MATCH("CASE", 4))
 +              return_value = _readCaseExpr();
 +      else if (MATCH("WHEN", 4))
 +              return_value = _readCaseWhen();
 +      else if (MATCH("CASETESTEXPR", 12))
 +              return_value = _readCaseTestExpr();
 +      else if (MATCH("ARRAY", 5))
 +              return_value = _readArrayExpr();
 +      else if (MATCH("ROW", 3))
 +              return_value = _readRowExpr();
 +      else if (MATCH("ROWCOMPARE", 10))
 +              return_value = _readRowCompareExpr();
 +      else if (MATCH("COALESCE", 8))
 +              return_value = _readCoalesceExpr();
 +      else if (MATCH("MINMAX", 6))
 +              return_value = _readMinMaxExpr();
 +      else if (MATCH("XMLEXPR", 7))
 +              return_value = _readXmlExpr();
 +      else if (MATCH("NULLTEST", 8))
 +              return_value = _readNullTest();
 +      else if (MATCH("BOOLEANTEST", 11))
 +              return_value = _readBooleanTest();
 +      else if (MATCH("COERCETODOMAIN", 14))
 +              return_value = _readCoerceToDomain();
 +      else if (MATCH("COERCETODOMAINVALUE", 19))
 +              return_value = _readCoerceToDomainValue();
 +      else if (MATCH("SETTODEFAULT", 12))
 +              return_value = _readSetToDefault();
 +      else if (MATCH("CURRENTOFEXPR", 13))
 +              return_value = _readCurrentOfExpr();
 +      else if (MATCH("INFERENCEELEM", 13))
 +              return_value = _readInferenceElem();
 +      else if (MATCH("TARGETENTRY", 11))
 +              return_value = _readTargetEntry();
 +      else if (MATCH("RANGETBLREF", 11))
 +              return_value = _readRangeTblRef();
 +      else if (MATCH("JOINEXPR", 8))
 +              return_value = _readJoinExpr();
 +      else if (MATCH("FROMEXPR", 8))
 +              return_value = _readFromExpr();
 +      else if (MATCH("ONCONFLICTEXPR", 14))
 +              return_value = _readOnConflictExpr();
 +      else if (MATCH("RTE", 3))
 +              return_value = _readRangeTblEntry();
 +      else if (MATCH("RANGETBLFUNCTION", 16))
 +              return_value = _readRangeTblFunction();
 +      else if (MATCH("TABLESAMPLECLAUSE", 17))
 +              return_value = _readTableSampleClause();
 +      else if (MATCH("NOTIFY", 6))
 +              return_value = _readNotifyStmt();
 +      else if (MATCH("DECLARECURSOR", 13))
 +              return_value = _readDeclareCursorStmt();
 +#ifdef XCP
 +      else if (MATCH("PLAN", 4))
 +              return_value = _readPlan();
 +      else if (MATCH("RESULT", 6))
 +              return_value = _readResult();
 +      else if (MATCH("MODIFYTABLE", 11))
 +              return_value = _readModifyTable();
 +      else if (MATCH("APPEND", 6))
 +              return_value = _readAppend();
 +      else if (MATCH("MERGEAPPEND", 11))
 +              return_value = _readMergeAppend();
 +      else if (MATCH("RECURSIVEUNION", 14))
 +              return_value = _readRecursiveUnion();
 +      else if (MATCH("BITMAPAND", 9))
 +              return_value = _readBitmapAnd();
 +      else if (MATCH("BITMAPOR", 8))
 +              return_value = _readBitmapOr();
 +      else if (MATCH("SCAN", 4))
 +              return_value = _readScan();
 +      else if (MATCH("SEQSCAN", 7))
 +              return_value = _readSeqScan();
 +      else if (MATCH("SAMPLESCAN", 10))
 +              return_value = _readSampleScan();
 +      else if (MATCH("INDEXSCAN", 9))
 +              return_value = _readIndexScan();
 +      else if (MATCH("INDEXONLYSCAN", 13))
 +              return_value = _readIndexOnlyScan();
 +      else if (MATCH("BITMAPINDEXSCAN", 15))
 +              return_value = _readBitmapIndexScan();
 +      else if (MATCH("BITMAPHEAPSCAN", 14))
 +              return_value = _readBitmapHeapScan();
 +      else if (MATCH("TIDSCAN", 7))
 +              return_value = _readTidScan();
 +      else if (MATCH("SUBQUERYSCAN", 12))
 +              return_value = _readSubqueryScan();
 +      else if (MATCH("FUNCTIONSCAN", 12))
 +              return_value = _readFunctionScan();
 +      else if (MATCH("VALUESSCAN", 10))
 +              return_value = _readValuesScan();
 +      else if (MATCH("CTESCAN", 7))
 +              return_value = _readCteScan();
 +      else if (MATCH("WORKTABLESCAN", 13))
 +              return_value = _readWorkTableScan();
 +      else if (MATCH("JOIN", 4))
 +              return_value = _readJoin();
 +      else if (MATCH("NESTLOOP", 8))
 +              return_value = _readNestLoop();
 +      else if (MATCH("MERGEJOIN", 9))
 +              return_value = _readMergeJoin();
 +      else if (MATCH("HASHJOIN", 8))
 +              return_value = _readHashJoin();
 +      else if (MATCH("MATERIAL", 8))
 +              return_value = _readMaterial();
 +      else if (MATCH("SORT", 4))
 +              return_value = _readSort();
 +      else if (MATCH("GROUP", 5))
 +              return_value = _readGroup();
 +      else if (MATCH("AGG", 3))
 +              return_value = _readAgg();
 +      else if (MATCH("WINDOWAGG", 9))
 +              return_value = _readWindowAgg();
 +      else if (MATCH("UNIQUE", 6))
 +              return_value = _readUnique();
 +      else if (MATCH("HASH", 4))
 +              return_value = _readHash();
 +      else if (MATCH("SETOP", 5))
 +              return_value = _readSetOp();
 +      else if (MATCH("LIMIT", 5))
 +              return_value = _readLimit();
 +      else if (MATCH("REMOTESUBPLAN", 13))
 +              return_value = _readRemoteSubplan();
 +      else if (MATCH("REMOTESTMT", 10))
 +              return_value = _readRemoteStmt();
 +      else if (MATCH("SIMPLESORT", 10))
 +              return_value = _readSimpleSort();
 +      else if (MATCH("NESTLOOPPARAM", 13))
 +              return_value = _readNestLoopParam();
 +      else if (MATCH("PLANROWMARK", 11))
 +              return_value = _readPlanRowMark();
 +      else if (MATCH("LOCKROWS", 8))
 +              return_value = _readLockRows();
 +#endif
 +      else
 +      {
 +              elog(ERROR, "badly formatted node string \"%.32s\"...", token);
 +              return_value = NULL;    /* keep compiler quiet */
 +      }
++=======
++ * _readDefElem
++ */
++static DefElem *
++_readDefElem(void)
++{
++      READ_LOCALS(DefElem);
 +
-       return (Node *) return_value;
++      READ_STRING_FIELD(defnamespace);
++      READ_STRING_FIELD(defname);
++      READ_NODE_FIELD(arg);
++      READ_ENUM_FIELD(defaction, DefElemAction);
++
++      READ_DONE();
 +}
 +
++/*
++ * _readPlannedStmt
++ */
++static PlannedStmt *
++_readPlannedStmt(void)
++{
++      READ_LOCALS(PlannedStmt);
++
++      READ_ENUM_FIELD(commandType, CmdType);
++      READ_UINT_FIELD(queryId);
++      READ_BOOL_FIELD(hasReturning);
++      READ_BOOL_FIELD(hasModifyingCTE);
++      READ_BOOL_FIELD(canSetTag);
++      READ_BOOL_FIELD(transientPlan);
++      READ_BOOL_FIELD(dependsOnRole);
++      READ_BOOL_FIELD(parallelModeNeeded);
++      READ_NODE_FIELD(planTree);
++      READ_NODE_FIELD(rtable);
++      READ_NODE_FIELD(resultRelations);
++      READ_NODE_FIELD(utilityStmt);
++      READ_NODE_FIELD(subplans);
++      READ_BITMAPSET_FIELD(rewindPlanIDs);
++      READ_NODE_FIELD(rowMarks);
++      READ_NODE_FIELD(relationOids);
++      READ_NODE_FIELD(invalItems);
++      READ_INT_FIELD(nParamExec);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
++
++      READ_DONE();
++}
 +
 +/*
-  * readDatum
-  *
-  * Given a string representation of a constant, recreate the appropriate
-  * Datum.  The string representation embeds length info, but not byValue,
-  * so we must be told that.
++ * ReadCommonPlan
++ *    Assign the basic stuff of all nodes that inherit from Plan
 + */
- static Datum
- readDatum(bool typbyval)
++static void
++ReadCommonPlan(Plan *local_node)
 +{
-       Size            length,
-                               i;
-       int                     tokenLength;
-       char       *token;
-       Datum           res;
-       char       *s;
++      READ_TEMP_LOCALS();
 +
-       /*
-        * read the actual length of the value
-        */
-       token = pg_strtok(&tokenLength);
-       length = atoui(token);
++      READ_FLOAT_FIELD(startup_cost);
++      READ_FLOAT_FIELD(total_cost);
++      READ_FLOAT_FIELD(plan_rows);
++      READ_INT_FIELD(plan_width);
++      READ_BOOL_FIELD(parallel_aware);
++      READ_INT_FIELD(plan_node_id);
++      READ_NODE_FIELD(targetlist);
++      READ_NODE_FIELD(qual);
++      READ_NODE_FIELD(lefttree);
++      READ_NODE_FIELD(righttree);
++      READ_NODE_FIELD(initPlan);
++      READ_BITMAPSET_FIELD(extParam);
++      READ_BITMAPSET_FIELD(allParam);
++}
 +
-       token = pg_strtok(&tokenLength);        /* read the '[' */
-       if (token == NULL || token[0] != '[')
-               elog(ERROR, "expected \"[\" to start datum, but got \"%s\"; length = %zu",
-                        token ? (const char *) token : "[NULL]", length);
++/*
++ * _readPlan
++ */
++static Plan *
++_readPlan(void)
++{
++      READ_LOCALS_NO_FIELDS(Plan);
 +
-       if (typbyval)
-       {
-               if (length > (Size) sizeof(Datum))
-                       elog(ERROR, "byval datum but length = %zu", length);
-               res = (Datum) 0;
-               s = (char *) (&res);
-               for (i = 0; i < (Size) sizeof(Datum); i++)
-               {
-                       token = pg_strtok(&tokenLength);
-                       s[i] = (char) atoi(token);
-               }
-       }
-       else if (length <= 0)
-               res = (Datum) NULL;
-       else
-       {
-               s = (char *) palloc(length);
-               for (i = 0; i < length; i++)
-               {
-                       token = pg_strtok(&tokenLength);
-                       s[i] = (char) atoi(token);
-               }
-               res = PointerGetDatum(s);
-       }
++      ReadCommonPlan(local_node);
 +
-       token = pg_strtok(&tokenLength);        /* read the ']' */
-       if (token == NULL || token[0] != ']')
-               elog(ERROR, "expected \"]\" to end datum, but got \"%s\"; length = %zu",
-                        token ? (const char *) token : "[NULL]", length);
++      READ_DONE();
++}
 +
-       return res;
++/*
++ * _readResult
++ */
++static Result *
++_readResult(void)
++{
++      READ_LOCALS(Result);
++
++      ReadCommonPlan(&local_node->plan);
++
++      READ_NODE_FIELD(resconstantqual);
++
++      READ_DONE();
 +}
 +
- #ifdef XCP
 +/*
-  * scanDatum
-  *
-  * Recreate Datum from the text format understandable by the input function
-  * of the specified data type.
++ * _readModifyTable
 + */
- static Datum
- scanDatum(Oid typid, int typmod)
++static ModifyTable *
++_readModifyTable(void)
 +{
-       Oid                     typInput;
-       Oid                     typioparam;
-       FmgrInfo        finfo;
-       FunctionCallInfoData fcinfo;
-       char       *value;
-       Datum           res;
-       READ_TEMP_LOCALS();
++      READ_LOCALS(ModifyTable);
 +
-       /* Get input function for the type */
-       getTypeInputInfo(typid, &typInput, &typioparam);
-       fmgr_info(typInput, &finfo);
++      ReadCommonPlan(&local_node->plan);
 +
-       /* Read the value */
-       token = pg_strtok(&length);
-       value = nullable_string(token, length);
++      READ_ENUM_FIELD(operation, CmdType);
++      READ_BOOL_FIELD(canSetTag);
++      READ_UINT_FIELD(nominalRelation);
++      READ_NODE_FIELD(resultRelations);
++      READ_INT_FIELD(resultRelIndex);
++      READ_NODE_FIELD(plans);
++      READ_NODE_FIELD(withCheckOptionLists);
++      READ_NODE_FIELD(returningLists);
++      READ_NODE_FIELD(fdwPrivLists);
++      READ_BITMAPSET_FIELD(fdwDirectModifyPlans);
++      READ_NODE_FIELD(rowMarks);
++      READ_INT_FIELD(epqParam);
++      READ_ENUM_FIELD(onConflictAction, OnConflictAction);
++      READ_NODE_FIELD(arbiterIndexes);
++      READ_NODE_FIELD(onConflictSet);
++      READ_NODE_FIELD(onConflictWhere);
++      READ_UINT_FIELD(exclRelRTI);
++      READ_NODE_FIELD(exclRelTlist);
 +
-       /* The value can not be NULL, so we actually received empty string */
-       if (value == NULL)
-               value = "";
++      READ_DONE();
++}
 +
-       /* Invoke input function */
-       InitFunctionCallInfoData(fcinfo, &finfo, 3, InvalidOid, NULL, NULL);
++/*
++ * _readAppend
++ */
++static Append *
++_readAppend(void)
++{
++      READ_LOCALS(Append);
 +
-       fcinfo.arg[0] = CStringGetDatum(value);
-       fcinfo.arg[1] = ObjectIdGetDatum(typioparam);
-       fcinfo.arg[2] = Int32GetDatum(typmod);
-       fcinfo.argnull[0] = false;
-       fcinfo.argnull[1] = false;
-       fcinfo.argnull[2] = false;
++      ReadCommonPlan(&local_node->plan);
 +
-       res = FunctionCallInvoke(&fcinfo);
++      READ_NODE_FIELD(appendplans);
 +
-       return res;
++      READ_DONE();
 +}
- #endif
++
++/*
++ * _readMergeAppend
++ */
++static MergeAppend *
++_readMergeAppend(void)
++{
++      READ_LOCALS(MergeAppend);
++
++      ReadCommonPlan(&local_node->plan);
++
++      READ_NODE_FIELD(mergeplans);
++      READ_INT_FIELD(numCols);
++      READ_ATTRNUMBER_ARRAY(sortColIdx, local_node->numCols);
++      READ_OID_ARRAY(sortOperators, local_node->numCols);
++      READ_OID_ARRAY(collations, local_node->numCols);
++      READ_BOOL_ARRAY(nullsFirst, local_node->numCols);
++
++      READ_DONE();
++}
++
++/*
++ * _readRecursiveUnion
++ */
++static RecursiveUnion *
++_readRecursiveUnion(void)
++{
++      READ_LOCALS(RecursiveUnion);
++
++      ReadCommonPlan(&local_node->plan);
++
++      READ_INT_FIELD(wtParam);
++      READ_INT_FIELD(numCols);
++      READ_ATTRNUMBER_ARRAY(dupColIdx, local_node->numCols);
++      READ_OID_ARRAY(dupOperators, local_node->numCols);
++      READ_LONG_FIELD(numGroups);
+       READ_DONE();
+ }
+ /*
+  * _readBitmapAnd
+  */
+ static BitmapAnd *
+ _readBitmapAnd(void)
+ {
+       READ_LOCALS(BitmapAnd);
+       ReadCommonPlan(&local_node->plan);
+       READ_NODE_FIELD(bitmapplans);
+       READ_DONE();
+ }
+ /*
+  * _readBitmapOr
+  */
+ static BitmapOr *
+ _readBitmapOr(void)
+ {
+       READ_LOCALS(BitmapOr);
+       ReadCommonPlan(&local_node->plan);
+       READ_NODE_FIELD(bitmapplans);
+       READ_DONE();
+ }
+ /*
+  * ReadCommonScan
+  *    Assign the basic stuff of all nodes that inherit from Scan
+  */
+ static void
+ ReadCommonScan(Scan *local_node)
+ {
+       READ_TEMP_LOCALS();
+       ReadCommonPlan(&local_node->plan);
+       READ_UINT_FIELD(scanrelid);
+ }
+ /*
+  * _readScan
+  */
+ static Scan *
+ _readScan(void)
+ {
+       READ_LOCALS_NO_FIELDS(Scan);
+       ReadCommonScan(local_node);
+       READ_DONE();
+ }
+ /*
+  * _readSeqScan
+  */
+ static SeqScan *
+ _readSeqScan(void)
+ {
+       READ_LOCALS_NO_FIELDS(SeqScan);
+       ReadCommonScan(local_node);
+       READ_DONE();
+ }
+ /*
+  * _readSampleScan
+  */
+ static SampleScan *
+ _readSampleScan(void)
+ {
+       READ_LOCALS(SampleScan);
+       ReadCommonScan(&local_node->scan);
+       READ_NODE_FIELD(tablesample);
+       READ_DONE();
+ }
+ /*
+  * _readIndexScan
+  */
+ static IndexScan *
+ _readIndexScan(void)
+ {
+       READ_LOCALS(IndexScan);
+       ReadCommonScan(&local_node->scan);
+       READ_OID_FIELD(indexid);
+       READ_NODE_FIELD(indexqual);
+       READ_NODE_FIELD(indexqualorig);
+       READ_NODE_FIELD(indexorderby);
+       READ_NODE_FIELD(indexorderbyorig);
+       READ_NODE_FIELD(indexorderbyops);
+       READ_ENUM_FIELD(indexorderdir, ScanDirection);
+       READ_DONE();
+ }
+ /*
+  * _readIndexOnlyScan
+  */
+ static IndexOnlyScan *
+ _readIndexOnlyScan(void)
+ {
+       READ_LOCALS(IndexOnlyScan);
+       ReadCommonScan(&local_node->scan);
+       READ_OID_FIELD(indexid);
+       READ_NODE_FIELD(indexqual);
+       READ_NODE_FIELD(indexorderby);
+       READ_NODE_FIELD(indextlist);
+       READ_ENUM_FIELD(indexorderdir, ScanDirection);
+       READ_DONE();
+ }
+ /*
+  * _readBitmapIndexScan
+  */
+ static BitmapIndexScan *
+ _readBitmapIndexScan(void)
+ {
+       READ_LOCALS(BitmapIndexScan);
+       ReadCommonScan(&local_node->scan);
+       READ_OID_FIELD(indexid);
+       READ_NODE_FIELD(indexqual);
+       READ_NODE_FIELD(indexqualorig);
+       READ_DONE();
+ }
+ /*
+  * _readBitmapHeapScan
+  */
+ static BitmapHeapScan *
+ _readBitmapHeapScan(void)
+ {
+       READ_LOCALS(BitmapHeapScan);
+       ReadCommonScan(&local_node->scan);
+       READ_NODE_FIELD(bitmapqualorig);
+       READ_DONE();
+ }
+ /*
+  * _readTidScan
+  */
+ static TidScan *
+ _readTidScan(void)
+ {
+       READ_LOCALS(TidScan);
+       ReadCommonScan(&local_node->scan);
+       READ_NODE_FIELD(tidquals);
+       READ_DONE();
+ }
+ /*
+  * _readSubqueryScan
+  */
+ static SubqueryScan *
+ _readSubqueryScan(void)
+ {
+       READ_LOCALS(SubqueryScan);
+       ReadCommonScan(&local_node->scan);
+       READ_NODE_FIELD(subplan);
+       READ_DONE();
+ }
+ /*
+  * _readFunctionScan
+  */
+ static FunctionScan *
+ _readFunctionScan(void)
+ {
+       READ_LOCALS(FunctionScan);
+       ReadCommonScan(&local_node->scan);
+       READ_NODE_FIELD(functions);
+       READ_BOOL_FIELD(funcordinality);
+       READ_DONE();
+ }
+ /*
+  * _readValuesScan
+  */
+ static ValuesScan *
+ _readValuesScan(void)
+ {
+       READ_LOCALS(ValuesScan);
+       ReadCommonScan(&local_node->scan);
+       READ_NODE_FIELD(values_lists);
+       READ_DONE();
+ }
+ /*
+  * _readCteScan
+  */
+ static CteScan *
+ _readCteScan(void)
+ {
+       READ_LOCALS(CteScan);
+       ReadCommonScan(&local_node->scan);
+       READ_INT_FIELD(ctePlanId);
+       READ_INT_FIELD(cteParam);
+       READ_DONE();
+ }
+ /*
+  * _readWorkTableScan
+  */
+ static WorkTableScan *
+ _readWorkTableScan(void)
+ {
+       READ_LOCALS(WorkTableScan);
+       ReadCommonScan(&local_node->scan);
+       READ_INT_FIELD(wtParam);
+       READ_DONE();
+ }
+ /*
+  * _readForeignScan
+  */
+ static ForeignScan *
+ _readForeignScan(void)
+ {
+       READ_LOCALS(ForeignScan);
+       ReadCommonScan(&local_node->scan);
+       READ_ENUM_FIELD(operation, CmdType);
+       READ_OID_FIELD(fs_server);
+       READ_NODE_FIELD(fdw_exprs);
+       READ_NODE_FIELD(fdw_private);
+       READ_NODE_FIELD(fdw_scan_tlist);
+       READ_NODE_FIELD(fdw_recheck_quals);
+       READ_BITMAPSET_FIELD(fs_relids);
+       READ_BOOL_FIELD(fsSystemCol);
+       READ_DONE();
+ }
+ /*
+  * _readCustomScan
+  */
+ static CustomScan *
+ _readCustomScan(void)
+ {
+       READ_LOCALS(CustomScan);
+       char       *custom_name;
+       const CustomScanMethods *methods;
+       ReadCommonScan(&local_node->scan);
+       READ_UINT_FIELD(flags);
+       READ_NODE_FIELD(custom_plans);
+       READ_NODE_FIELD(custom_exprs);
+       READ_NODE_FIELD(custom_private);
+       READ_NODE_FIELD(custom_scan_tlist);
+       READ_BITMAPSET_FIELD(custom_relids);
+       /* Lookup CustomScanMethods by CustomName */
+       token = pg_strtok(&length); /* skip methods: */
+       token = pg_strtok(&length); /* CustomName */
+       custom_name = nullable_string(token, length);
+       methods = GetCustomScanMethods(custom_name, false);
+       local_node->methods = methods;
+       READ_DONE();
+ }
+ /*
+  * ReadCommonJoin
+  *    Assign the basic stuff of all nodes that inherit from Join
+  */
+ static void
+ ReadCommonJoin(Join *local_node)
+ {
+       READ_TEMP_LOCALS();
+       ReadCommonPlan(&local_node->plan);
+       READ_ENUM_FIELD(jointype, JoinType);
+       READ_NODE_FIELD(joinqual);
+ }
+ /*
+  * _readJoin
+  */
+ static Join *
+ _readJoin(void)
+ {
+       READ_LOCALS_NO_FIELDS(Join);
+       ReadCommonJoin(local_node);
+       READ_DONE();
+ }
+ /*
+  * _readNestLoop
+  */
+ static NestLoop *
+ _readNestLoop(void)
+ {
+       READ_LOCALS(NestLoop);
+       ReadCommonJoin(&local_node->join);
+       READ_NODE_FIELD(nestParams);
+       READ_DONE();
+ }
+ /*
+  * _readMergeJoin
+  */
+ static MergeJoin *
+ _readMergeJoin(void)
+ {
+       int                     numCols;
+       READ_LOCALS(MergeJoin);
+       ReadCommonJoin(&local_node->join);
+       READ_NODE_FIELD(mergeclauses);
+       numCols = list_length(local_node->mergeclauses);
+       READ_OID_ARRAY(mergeFamilies, numCols);
+       READ_OID_ARRAY(mergeCollations, numCols);
+       READ_INT_ARRAY(mergeStrategies, numCols);
+       READ_BOOL_ARRAY(mergeNullsFirst, numCols);
+       READ_DONE();
+ }
+ /*
+  * _readHashJoin
+  */
+ static HashJoin *
+ _readHashJoin(void)
+ {
+       READ_LOCALS(HashJoin);
+       ReadCommonJoin(&local_node->join);
+       READ_NODE_FIELD(hashclauses);
+       READ_DONE();
+ }
+ /*
+  * _readMaterial
+  */
+ static Material *
+ _readMaterial(void)
+ {
+       READ_LOCALS_NO_FIELDS(Material);
+       ReadCommonPlan(&local_node->plan);
+       READ_DONE();
+ }
+ /*
+  * _readSort
+  */
+ static Sort *
+ _readSort(void)
+ {
+       READ_LOCALS(Sort);
+       ReadCommonPlan(&local_node->plan);
+       READ_INT_FIELD(numCols);
+       READ_ATTRNUMBER_ARRAY(sortColIdx, local_node->numCols);
+       READ_OID_ARRAY(sortOperators, local_node->numCols);
+       READ_OID_ARRAY(collations, local_node->numCols);
+       READ_BOOL_ARRAY(nullsFirst, local_node->numCols);
+       READ_DONE();
+ }
+ /*
+  * _readGroup
+  */
+ static Group *
+ _readGroup(void)
+ {
+       READ_LOCALS(Group);
+       ReadCommonPlan(&local_node->plan);
+       READ_INT_FIELD(numCols);
+       READ_ATTRNUMBER_ARRAY(grpColIdx, local_node->numCols);
+       READ_OID_ARRAY(grpOperators, local_node->numCols);
+       READ_DONE();
+ }
+ /*
+  * _readAgg
+  */
+ static Agg *
+ _readAgg(void)
+ {
+       READ_LOCALS(Agg);
+       ReadCommonPlan(&local_node->plan);
+       READ_ENUM_FIELD(aggstrategy, AggStrategy);
+       READ_ENUM_FIELD(aggsplit, AggSplit);
+       READ_INT_FIELD(numCols);
+       READ_ATTRNUMBER_ARRAY(grpColIdx, local_node->numCols);
+       READ_OID_ARRAY(grpOperators, local_node->numCols);
+       READ_LONG_FIELD(numGroups);
+       READ_NODE_FIELD(groupingSets);
+       READ_NODE_FIELD(chain);
+       READ_DONE();
+ }
+ /*
+  * _readWindowAgg
+  */
+ static WindowAgg *
+ _readWindowAgg(void)
+ {
+       READ_LOCALS(WindowAgg);
+       ReadCommonPlan(&local_node->plan);
+       READ_UINT_FIELD(winref);
+       READ_INT_FIELD(partNumCols);
+       READ_ATTRNUMBER_ARRAY(partColIdx, local_node->partNumCols);
+       READ_OID_ARRAY(partOperators, local_node->partNumCols);
+       READ_INT_FIELD(ordNumCols);
+       READ_ATTRNUMBER_ARRAY(ordColIdx, local_node->ordNumCols);
+       READ_OID_ARRAY(ordOperators, local_node->ordNumCols);
+       READ_INT_FIELD(frameOptions);
+       READ_NODE_FIELD(startOffset);
+       READ_NODE_FIELD(endOffset);
+       READ_DONE();
+ }
+ /*
+  * _readUnique
+  */
+ static Unique *
+ _readUnique(void)
+ {
+       READ_LOCALS(Unique);
+       ReadCommonPlan(&local_node->plan);
+       READ_INT_FIELD(numCols);
+       READ_ATTRNUMBER_ARRAY(uniqColIdx, local_node->numCols);
+       READ_OID_ARRAY(uniqOperators, local_node->numCols);
+       READ_DONE();
+ }
+ /*
+  * _readGather
+  */
+ static Gather *
+ _readGather(void)
+ {
+       READ_LOCALS(Gather);
+       ReadCommonPlan(&local_node->plan);
+       READ_INT_FIELD(num_workers);
+       READ_BOOL_FIELD(single_copy);
+       READ_BOOL_FIELD(invisible);
+       READ_DONE();
+ }
+ /*
+  * _readHash
+  */
+ static Hash *
+ _readHash(void)
+ {
+       READ_LOCALS(Hash);
+       ReadCommonPlan(&local_node->plan);
+       READ_OID_FIELD(skewTable);
+       READ_INT_FIELD(skewColumn);
+       READ_BOOL_FIELD(skewInherit);
+       READ_OID_FIELD(skewColType);
+       READ_INT_FIELD(skewColTypmod);
+       READ_DONE();
+ }
+ /*
+  * _readSetOp
+  */
+ static SetOp *
+ _readSetOp(void)
+ {
+       READ_LOCALS(SetOp);
+       ReadCommonPlan(&local_node->plan);
+       READ_ENUM_FIELD(cmd, SetOpCmd);
+       READ_ENUM_FIELD(strategy, SetOpStrategy);
+       READ_INT_FIELD(numCols);
+       READ_ATTRNUMBER_ARRAY(dupColIdx, local_node->numCols);
+       READ_OID_ARRAY(dupOperators, local_node->numCols);
+       READ_INT_FIELD(flagColIdx);
+       READ_INT_FIELD(firstFlag);
+       READ_LONG_FIELD(numGroups);
+       READ_DONE();
+ }
+ /*
+  * _readLockRows
+  */
+ static LockRows *
+ _readLockRows(void)
+ {
+       READ_LOCALS(LockRows);
+       ReadCommonPlan(&local_node->plan);
+       READ_NODE_FIELD(rowMarks);
+       READ_INT_FIELD(epqParam);
+       READ_DONE();
+ }
+ /*
+  * _readLimit
+  */
+ static Limit *
+ _readLimit(void)
+ {
+       READ_LOCALS(Limit);
+       ReadCommonPlan(&local_node->plan);
+       READ_NODE_FIELD(limitOffset);
+       READ_NODE_FIELD(limitCount);
+       READ_DONE();
+ }
+ /*
+  * _readNestLoopParam
+  */
+ static NestLoopParam *
+ _readNestLoopParam(void)
+ {
+       READ_LOCALS(NestLoopParam);
+       READ_INT_FIELD(paramno);
+       READ_NODE_FIELD(paramval);
+       READ_DONE();
+ }
+ /*
+  * _readPlanRowMark
+  */
+ static PlanRowMark *
+ _readPlanRowMark(void)
+ {
+       READ_LOCALS(PlanRowMark);
+       READ_UINT_FIELD(rti);
+       READ_UINT_FIELD(prti);
+       READ_UINT_FIELD(rowmarkId);
+       READ_ENUM_FIELD(markType, RowMarkType);
+       READ_INT_FIELD(allMarkTypes);
+       READ_ENUM_FIELD(strength, LockClauseStrength);
+       READ_ENUM_FIELD(waitPolicy, LockWaitPolicy);
+       READ_BOOL_FIELD(isParent);
+       READ_DONE();
+ }
+ /*
+  * _readPlanInvalItem
+  */
+ static PlanInvalItem *
+ _readPlanInvalItem(void)
+ {
+       READ_LOCALS(PlanInvalItem);
+       READ_INT_FIELD(cacheId);
+       READ_UINT_FIELD(hashValue);
+       READ_DONE();
+ }
+ /*
+  * _readSubPlan
+  */
+ static SubPlan *
+ _readSubPlan(void)
+ {
+       READ_LOCALS(SubPlan);
+       READ_ENUM_FIELD(subLinkType, SubLinkType);
+       READ_NODE_FIELD(testexpr);
+       READ_NODE_FIELD(paramIds);
+       READ_INT_FIELD(plan_id);
+       READ_STRING_FIELD(plan_name);
+       READ_OID_FIELD(firstColType);
+       READ_INT_FIELD(firstColTypmod);
+       READ_OID_FIELD(firstColCollation);
+       READ_BOOL_FIELD(useHashTable);
+       READ_BOOL_FIELD(unknownEqFalse);
+       READ_NODE_FIELD(setParam);
+       READ_NODE_FIELD(parParam);
+       READ_NODE_FIELD(args);
+       READ_FLOAT_FIELD(startup_cost);
+       READ_FLOAT_FIELD(per_call_cost);
+       READ_DONE();
+ }
+ /*
+  * _readAlternativeSubPlan
+  */
+ static AlternativeSubPlan *
+ _readAlternativeSubPlan(void)
+ {
+       READ_LOCALS(AlternativeSubPlan);
+       READ_NODE_FIELD(subplans);
+       READ_DONE();
+ }
+ /*
+  * _readExtensibleNode
+  */
+ static ExtensibleNode *
+ _readExtensibleNode(void)
+ {
+       const ExtensibleNodeMethods *methods;
+       ExtensibleNode *local_node;
+       const char *extnodename;
+       READ_TEMP_LOCALS();
+       token = pg_strtok(&length); /* skip :extnodename */
+       token = pg_strtok(&length); /* get extnodename */
+       extnodename = nullable_string(token, length);
+       if (!extnodename)
+               elog(ERROR, "extnodename has to be supplied");
+       methods = GetExtensibleNodeMethods(extnodename, false);
+       local_node = (ExtensibleNode *) newNode(methods->node_size,
+                                                                                       T_ExtensibleNode);
+       local_node->extnodename = extnodename;
+       /* deserialize the private fields */
+       methods->nodeRead(local_node);
+       READ_DONE();
+ }
+ /*
+  * parseNodeString
+  *
+  * Given a character string representing a node tree, parseNodeString creates
+  * the internal node structure.
+  *
+  * The string to be read must already have been loaded into pg_strtok().
+  */
+ Node *
+ parseNodeString(void)
+ {
+       void       *return_value;
+       READ_TEMP_LOCALS();
+       token = pg_strtok(&length);
+ #define MATCH(tokname, namelen) \
+       (length == namelen && memcmp(token, tokname, namelen) == 0)
+       if (MATCH("QUERY", 5))
+               return_value = _readQuery();
+       else if (MATCH("WITHCHECKOPTION", 15))
+               return_value = _readWithCheckOption();
+       else if (MATCH("SORTGROUPCLAUSE", 15))
+               return_value = _readSortGroupClause();
+       else if (MATCH("GROUPINGSET", 11))
+               return_value = _readGroupingSet();
+       else if (MATCH("WINDOWCLAUSE", 12))
+               return_value = _readWindowClause();
+       else if (MATCH("ROWMARKCLAUSE", 13))
+               return_value = _readRowMarkClause();
+       else if (MATCH("COMMONTABLEEXPR", 15))
+               return_value = _readCommonTableExpr();
+       else if (MATCH("SETOPERATIONSTMT", 16))
+               return_value = _readSetOperationStmt();
+       else if (MATCH("ALIAS", 5))
+               return_value = _readAlias();
+       else if (MATCH("RANGEVAR", 8))
+               return_value = _readRangeVar();
+       else if (MATCH("INTOCLAUSE", 10))
+               return_value = _readIntoClause();
+       else if (MATCH("VAR", 3))
+               return_value = _readVar();
+       else if (MATCH("CONST", 5))
+               return_value = _readConst();
+       else if (MATCH("PARAM", 5))
+               return_value = _readParam();
+       else if (MATCH("AGGREF", 6))
+               return_value = _readAggref();
+       else if (MATCH("GROUPINGFUNC", 12))
+               return_value = _readGroupingFunc();
+       else if (MATCH("WINDOWFUNC", 10))
+               return_value = _readWindowFunc();
+       else if (MATCH("ARRAYREF", 8))
+               return_value = _readArrayRef();
+       else if (MATCH("FUNCEXPR", 8))
+               return_value = _readFuncExpr();
+       else if (MATCH("NAMEDARGEXPR", 12))
+               return_value = _readNamedArgExpr();
+       else if (MATCH("OPEXPR", 6))
+               return_value = _readOpExpr();
+       else if (MATCH("DISTINCTEXPR", 12))
+               return_value = _readDistinctExpr();
+       else if (MATCH("NULLIFEXPR", 10))
+               return_value = _readNullIfExpr();
+       else if (MATCH("SCALARARRAYOPEXPR", 17))
+               return_value = _readScalarArrayOpExpr();
+       else if (MATCH("BOOLEXPR", 8))
+               return_value = _readBoolExpr();
+       else if (MATCH("SUBLINK", 7))
+               return_value = _readSubLink();
+       else if (MATCH("FIELDSELECT", 11))
+               return_value = _readFieldSelect();
+       else if (MATCH("FIELDSTORE", 10))
+               return_value = _readFieldStore();
+       else if (MATCH("RELABELTYPE", 11))
+               return_value = _readRelabelType();
+       else if (MATCH("COERCEVIAIO", 11))
+               return_value = _readCoerceViaIO();
+       else if (MATCH("ARRAYCOERCEEXPR", 15))
+               return_value = _readArrayCoerceExpr();
+       else if (MATCH("CONVERTROWTYPEEXPR", 18))
+               return_value = _readConvertRowtypeExpr();
+       else if (MATCH("COLLATE", 7))
+               return_value = _readCollateExpr();
+       else if (MATCH("CASE", 4))
+               return_value = _readCaseExpr();
+       else if (MATCH("WHEN", 4))
+               return_value = _readCaseWhen();
+       else if (MATCH("CASETESTEXPR", 12))
+               return_value = _readCaseTestExpr();
+       else if (MATCH("ARRAY", 5))
+               return_value = _readArrayExpr();
+       else if (MATCH("ROW", 3))
+               return_value = _readRowExpr();
+       else if (MATCH("ROWCOMPARE", 10))
+               return_value = _readRowCompareExpr();
+       else if (MATCH("COALESCE", 8))
+               return_value = _readCoalesceExpr();
+       else if (MATCH("MINMAX", 6))
+               return_value = _readMinMaxExpr();
+       else if (MATCH("XMLEXPR", 7))
+               return_value = _readXmlExpr();
+       else if (MATCH("NULLTEST", 8))
+               return_value = _readNullTest();
+       else if (MATCH("BOOLEANTEST", 11))
+               return_value = _readBooleanTest();
+       else if (MATCH("COERCETODOMAIN", 14))
+               return_value = _readCoerceToDomain();
+       else if (MATCH("COERCETODOMAINVALUE", 19))
+               return_value = _readCoerceToDomainValue();
+       else if (MATCH("SETTODEFAULT", 12))
+               return_value = _readSetToDefault();
+       else if (MATCH("CURRENTOFEXPR", 13))
+               return_value = _readCurrentOfExpr();
+       else if (MATCH("INFERENCEELEM", 13))
+               return_value = _readInferenceElem();
+       else if (MATCH("TARGETENTRY", 11))
+               return_value = _readTargetEntry();
+       else if (MATCH("RANGETBLREF", 11))
+               return_value = _readRangeTblRef();
+       else if (MATCH("JOINEXPR", 8))
+               return_value = _readJoinExpr();
+       else if (MATCH("FROMEXPR", 8))
+               return_value = _readFromExpr();
+       else if (MATCH("ONCONFLICTEXPR", 14))
+               return_value = _readOnConflictExpr();
+       else if (MATCH("RTE", 3))
+               return_value = _readRangeTblEntry();
+       else if (MATCH("RANGETBLFUNCTION", 16))
+               return_value = _readRangeTblFunction();
+       else if (MATCH("TABLESAMPLECLAUSE", 17))
+               return_value = _readTableSampleClause();
+       else if (MATCH("NOTIFY", 6))
+               return_value = _readNotifyStmt();
+       else if (MATCH("DEFELEM", 7))
+               return_value = _readDefElem();
+       else if (MATCH("DECLARECURSOR", 13))
+               return_value = _readDeclareCursorStmt();
+       else if (MATCH("PLANNEDSTMT", 11))
+               return_value = _readPlannedStmt();
+       else if (MATCH("PLAN", 4))
+               return_value = _readPlan();
+       else if (MATCH("RESULT", 6))
+               return_value = _readResult();
+       else if (MATCH("MODIFYTABLE", 11))
+               return_value = _readModifyTable();
+       else if (MATCH("APPEND", 6))
+               return_value = _readAppend();
+       else if (MATCH("MERGEAPPEND", 11))
+               return_value = _readMergeAppend();
+       else if (MATCH("RECURSIVEUNION", 14))
+               return_value = _readRecursiveUnion();
+       else if (MATCH("BITMAPAND", 9))
+               return_value = _readBitmapAnd();
+       else if (MATCH("BITMAPOR", 8))
+               return_value = _readBitmapOr();
+       else if (MATCH("SCAN", 4))
+               return_value = _readScan();
+       else if (MATCH("SEQSCAN", 7))
+               return_value = _readSeqScan();
+       else if (MATCH("SAMPLESCAN", 10))
+               return_value = _readSampleScan();
+       else if (MATCH("INDEXSCAN", 9))
+               return_value = _readIndexScan();
+       else if (MATCH("INDEXONLYSCAN", 13))
+               return_value = _readIndexOnlyScan();
+       else if (MATCH("BITMAPINDEXSCAN", 15))
+               return_value = _readBitmapIndexScan();
+       else if (MATCH("BITMAPHEAPSCAN", 14))
+               return_value = _readBitmapHeapScan();
+       else if (MATCH("TIDSCAN", 7))
+               return_value = _readTidScan();
+       else if (MATCH("SUBQUERYSCAN", 12))
+               return_value = _readSubqueryScan();
+       else if (MATCH("FUNCTIONSCAN", 12))
+               return_value = _readFunctionScan();
+       else if (MATCH("VALUESSCAN", 10))
+               return_value = _readValuesScan();
+       else if (MATCH("CTESCAN", 7))
+               return_value = _readCteScan();
+       else if (MATCH("WORKTABLESCAN", 13))
+               return_value = _readWorkTableScan();
+       else if (MATCH("FOREIGNSCAN", 11))
+               return_value = _readForeignScan();
+       else if (MATCH("CUSTOMSCAN", 10))
+               return_value = _readCustomScan();
+       else if (MATCH("JOIN", 4))
+               return_value = _readJoin();
+       else if (MATCH("NESTLOOP", 8))
+               return_value = _readNestLoop();
+       else if (MATCH("MERGEJOIN", 9))
+               return_value = _readMergeJoin();
+       else if (MATCH("HASHJOIN", 8))
+               return_value = _readHashJoin();
+       else if (MATCH("MATERIAL", 8))
+               return_value = _readMaterial();
+       else if (MATCH("SORT", 4))
+               return_value = _readSort();
+       else if (MATCH("GROUP", 5))
+               return_value = _readGroup();
+       else if (MATCH("AGG", 3))
+               return_value = _readAgg();
+       else if (MATCH("WINDOWAGG", 9))
+               return_value = _readWindowAgg();
+       else if (MATCH("UNIQUE", 6))
+               return_value = _readUnique();
+       else if (MATCH("GATHER", 6))
+               return_value = _readGather();
+       else if (MATCH("HASH", 4))
+               return_value = _readHash();
+       else if (MATCH("SETOP", 5))
+               return_value = _readSetOp();
+       else if (MATCH("LOCKROWS", 8))
+               return_value = _readLockRows();
+       else if (MATCH("LIMIT", 5))
+               return_value = _readLimit();
+       else if (MATCH("NESTLOOPPARAM", 13))
+               return_value = _readNestLoopParam();
+       else if (MATCH("PLANROWMARK", 11))
+               return_value = _readPlanRowMark();
+       else if (MATCH("PLANINVALITEM", 13))
+               return_value = _readPlanInvalItem();
+       else if (MATCH("SUBPLAN", 7))
+               return_value = _readSubPlan();
+       else if (MATCH("ALTERNATIVESUBPLAN", 18))
+               return_value = _readAlternativeSubPlan();
+       else if (MATCH("EXTENSIBLENODE", 14))
+               return_value = _readExtensibleNode();
+       else
+       {
+               elog(ERROR, "badly formatted node string \"%.32s\"...", token);
+               return_value = NULL;    /* keep compiler quiet */
+       }
+       return (Node *) return_value;
+ }
+ /*
+  * readDatum
+  *
+  * Given a string representation of a constant, recreate the appropriate
+  * Datum.  The string representation embeds length info, but not byValue,
+  * so we must be told that.
+  */
+ Datum
+ readDatum(bool typbyval)
+ {
+       Size            length,
+                               i;
+       int                     tokenLength;
+       char       *token;
+       Datum           res;
+       char       *s;
+       /*
+        * read the actual length of the value
+        */
+       token = pg_strtok(&tokenLength);
+       length = atoui(token);
+       token = pg_strtok(&tokenLength);        /* read the '[' */
+       if (token == NULL || token[0] != '[')
+               elog(ERROR, "expected \"[\" to start datum, but got \"%s\"; length = %zu",
+                        token ? (const char *) token : "[NULL]", length);
+       if (typbyval)
+       {
+               if (length > (Size) sizeof(Datum))
+                       elog(ERROR, "byval datum but length = %zu", length);
+               res = (Datum) 0;
+               s = (char *) (&res);
+               for (i = 0; i < (Size) sizeof(Datum); i++)
+               {
+                       token = pg_strtok(&tokenLength);
+                       s[i] = (char) atoi(token);
+               }
+       }
+       else if (length <= 0)
+               res = (Datum) NULL;
+       else
+       {
+               s = (char *) palloc(length);
+               for (i = 0; i < length; i++)
+               {
+                       token = pg_strtok(&tokenLength);
+                       s[i] = (char) atoi(token);
+               }
+               res = PointerGetDatum(s);
+       }
+       token = pg_strtok(&tokenLength);        /* read the ']' */
+       if (token == NULL || token[0] != ']')
+               elog(ERROR, "expected \"]\" to end datum, but got \"%s\"; length = %zu",
+                        token ? (const char *) token : "[NULL]", length);
+       return res;
+ }
++<<<<<<< HEAD
++#ifdef XCP
++/*
++ * scanDatum
++ *
++ * Recreate Datum from the text format understandable by the input function
++ * of the specified data type.
++ */
++static Datum
++scanDatum(Oid typid, int typmod)
++{
++      Oid                     typInput;
++      Oid                     typioparam;
++      FmgrInfo        finfo;
++      FunctionCallInfoData fcinfo;
++      char       *value;
++      Datum           res;
++      READ_TEMP_LOCALS();
++
++      /* Get input function for the type */
++      getTypeInputInfo(typid, &typInput, &typioparam);
++      fmgr_info(typInput, &finfo);
++
++      /* Read the value */
++      token = pg_strtok(&length);
++      value = nullable_string(token, length);
++
++      /* The value can not be NULL, so we actually received empty string */
++      if (value == NULL)
++              value = "";
++
++      /* Invoke input function */
++      InitFunctionCallInfoData(fcinfo, &finfo, 3, InvalidOid, NULL, NULL);
++
++      fcinfo.arg[0] = CStringGetDatum(value);
++      fcinfo.arg[1] = ObjectIdGetDatum(typioparam);
++      fcinfo.arg[2] = Int32GetDatum(typmod);
++      fcinfo.argnull[0] = false;
++      fcinfo.argnull[1] = false;
++      fcinfo.argnull[2] = false;
++
++      res = FunctionCallInvoke(&fcinfo);
++
++      return res;
++}
++#endif
++=======
+ /*
+  * readAttrNumberCols
+  */
+ AttrNumber *
+ readAttrNumberCols(int numCols)
+ {
+       int                     tokenLength,
+                               i;
+       char       *token;
+       AttrNumber *attr_vals;
+       if (numCols <= 0)
+               return NULL;
+       attr_vals = (AttrNumber *) palloc(numCols * sizeof(AttrNumber));
+       for (i = 0; i < numCols; i++)
+       {
+               token = pg_strtok(&tokenLength);
+               attr_vals[i] = atoi(token);
+       }
+       return attr_vals;
+ }
+ /*
+  * readOidCols
+  */
+ Oid *
+ readOidCols(int numCols)
+ {
+       int                     tokenLength,
+                               i;
+       char       *token;
+       Oid                *oid_vals;
+       if (numCols <= 0)
+               return NULL;
+       oid_vals = (Oid *) palloc(numCols * sizeof(Oid));
+       for (i = 0; i < numCols; i++)
+       {
+               token = pg_strtok(&tokenLength);
+               oid_vals[i] = atooid(token);
+       }
+       return oid_vals;
+ }
+ /*
+  * readIntCols
+  */
+ int *
+ readIntCols(int numCols)
+ {
+       int                     tokenLength,
+                               i;
+       char       *token;
+       int                *int_vals;
+       if (numCols <= 0)
+               return NULL;
+       int_vals = (int *) palloc(numCols * sizeof(int));
+       for (i = 0; i < numCols; i++)
+       {
+               token = pg_strtok(&tokenLength);
+               int_vals[i] = atoi(token);
+       }
+       return int_vals;
+ }
+ /*
+  * readBoolCols
+  */
+ bool *
+ readBoolCols(int numCols)
+ {
+       int                     tokenLength,
+                               i;
+       char       *token;
+       bool       *bool_vals;
+       if (numCols <= 0)
+               return NULL;
+       bool_vals = (bool *) palloc(numCols * sizeof(bool));
+       for (i = 0; i < numCols; i++)
+       {
+               token = pg_strtok(&tokenLength);
+               bool_vals[i] = strtobool(token);
+       }
+       return bool_vals;
+ }
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
index 2ca91839d9d8f233df797ec41d26f3c2545858e2,88d833a2e89682b12eee1c4b1c50a4e7f47add1a..29c27c59eee6850bad849766339cd9b89916b264
@@@ -3,8 -3,7 +3,8 @@@
   * allpaths.c
   *      Routines to find possible search paths for processing a query
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  
  #include "postgres.h"
  
+ #include <limits.h>
  #include <math.h>
  
 +#include "catalog/pg_namespace.h"
  #include "access/sysattr.h"
  #include "access/tsmapi.h"
  #include "catalog/pg_class.h"
@@@ -464,7 -493,176 +499,177 @@@ set_plain_rel_size(PlannerInfo *root, R
        set_baserel_size_estimates(root, rel);
  }
  
 +
+ /*
+  * If this relation could possibly be scanned from within a worker, then set
+  * its consider_parallel flag.
+  */
+ static void
+ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
+                                                 RangeTblEntry *rte)
+ {
+       /*
+        * The flag has previously been initialized to false, so we can just
+        * return if it becomes clear that we can't safely set it.
+        */
+       Assert(!rel->consider_parallel);
+       /* Don't call this if parallelism is disallowed for the entire query. */
+       Assert(root->glob->parallelModeOK);
+       /* This should only be called for baserels and appendrel children. */
+       Assert(rel->reloptkind == RELOPT_BASEREL ||
+                  rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
+       /* Assorted checks based on rtekind. */
+       switch (rte->rtekind)
+       {
+               case RTE_RELATION:
+                       /*
+                        * Currently, parallel workers can't access the leader's temporary
+                        * tables.  We could possibly relax this if the wrote all of its
+                        * local buffers at the start of the query and made no changes
+                        * thereafter (maybe we could allow hint bit changes), and if we
+                        * taught the workers to read them.  Writing a large number of
+                        * temporary buffers could be expensive, though, and we don't have
+                        * the rest of the necessary infrastructure right now anyway.  So
+                        * for now, bail out if we see a temporary table.
+                        */
+                       if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
+                               return;
+                       /*
+                        * Table sampling can be pushed down to workers if the sample
+                        * function and its arguments are safe.
+                        */
+                       if (rte->tablesample != NULL)
+                       {
+                               Oid                     proparallel = func_parallel(rte->tablesample->tsmhandler);
+                               if (proparallel != PROPARALLEL_SAFE)
+                                       return;
+                               if (has_parallel_hazard((Node *) rte->tablesample->args,
+                                                                               false))
+                                       return;
+                       }
+                       /*
+                        * Ask FDWs whether they can support performing a ForeignScan
+                        * within a worker.  Most often, the answer will be no.  For
+                        * example, if the nature of the FDW is such that it opens a TCP
+                        * connection with a remote server, each parallel worker would end
+                        * up with a separate connection, and these connections might not
+                        * be appropriately coordinated between workers and the leader.
+                        */
+                       if (rte->relkind == RELKIND_FOREIGN_TABLE)
+                       {
+                               Assert(rel->fdwroutine);
+                               if (!rel->fdwroutine->IsForeignScanParallelSafe)
+                                       return;
+                               if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
+                                       return;
+                       }
+                       /*
+                        * There are additional considerations for appendrels, which we'll
+                        * deal with in set_append_rel_size and set_append_rel_pathlist.
+                        * For now, just set consider_parallel based on the rel's own
+                        * quals and targetlist.
+                        */
+                       break;
+               case RTE_SUBQUERY:
+                       /*
+                        * There's no intrinsic problem with scanning a subquery-in-FROM
+                        * (as distinct from a SubPlan or InitPlan) in a parallel worker.
+                        * If the subquery doesn't happen to have any parallel-safe paths,
+                        * then flagging it as consider_parallel won't change anything,
+                        * but that's true for plain tables, too.  We must set
+                        * consider_parallel based on the rel's own quals and targetlist,
+                        * so that if a subquery path is parallel-safe but the quals and
+                        * projection we're sticking onto it are not, we correctly mark
+                        * the SubqueryScanPath as not parallel-safe.  (Note that
+                        * set_subquery_pathlist() might push some of these quals down
+                        * into the subquery itself, but that doesn't change anything.)
+                        */
+                       break;
+               case RTE_JOIN:
+                       /* Shouldn't happen; we're only considering baserels here. */
+                       Assert(false);
+                       return;
+               case RTE_FUNCTION:
+                       /* Check for parallel-restricted functions. */
+                       if (!function_rte_parallel_ok(rte))
+                               return;
+                       break;
+               case RTE_VALUES:
+                       /*
+                        * The data for a VALUES clause is stored in the plan tree itself,
+                        * so scanning it in a worker is fine.
+                        */
+                       break;
+               case RTE_CTE:
+                       /*
+                        * CTE tuplestores aren't shared among parallel workers, so we
+                        * force all CTE scans to happen in the leader.  Also, populating
+                        * the CTE would require executing a subplan that's not available
+                        * in the worker, might be parallel-restricted, and must get
+                        * executed only once.
+                        */
+                       return;
+       }
+       /*
+        * If there's anything in baserestrictinfo that's parallel-restricted, we
+        * give up on parallelizing access to this relation.  We could consider
+        * instead postponing application of the restricted quals until we're
+        * above all the parallelism in the plan tree, but it's not clear that
+        * that would be a win in very many cases, and it might be tricky to make
+        * outer join clauses work correctly.  It would likely break equivalence
+        * classes, too.
+        */
+       if (has_parallel_hazard((Node *) rel->baserestrictinfo, false))
+               return;
+       /*
+        * Likewise, if the relation's outputs are not parallel-safe, give up.
+        * (Usually, they're just Vars, but sometimes they're not.)
+        */
+       if (has_parallel_hazard((Node *) rel->reltarget->exprs, false))
+               return;
+       /* We have a winner. */
+       rel->consider_parallel = true;
+ }
+ /*
+  * Check whether a function RTE is scanning something parallel-restricted.
+  */
+ static bool
+ function_rte_parallel_ok(RangeTblEntry *rte)
+ {
+       ListCell   *lc;
+       foreach(lc, rte->functions)
+       {
+               RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
+               Assert(IsA(rtfunc, RangeTblFunction));
+               if (has_parallel_hazard(rtfunc->funcexpr, false))
+                       return false;
+       }
+       return true;
+ }
  /*
   * set_plain_rel_pathlist
   *      Build access paths for a plain relation (no subquery, no inheritance)
@@@ -1318,11 -1654,8 +1661,11 @@@ set_subquery_pathlist(PlannerInfo *root
        Relids          required_outer;
        pushdown_safety_info safetyInfo;
        double          tuple_fraction;
-       PlannerInfo *subroot;
-       List       *pathkeys;
 +#ifdef XCP
 +      Distribution *distribution;
 +#endif
+       RelOptInfo *sub_final_rel;
+       ListCell   *lc;
  
        /*
         * Must copy the Query so that planning doesn't mess up the RTE contents
                return;
        }
  
-       /* Mark rel with estimated output rows, width, etc */
+       /*
+        * Mark rel with estimated output rows, width, etc.  Note that we have to
+        * do this before generating outer-query paths, else cost_subqueryscan is
+        * not happy.
+        */
        set_subquery_size_estimates(root, rel);
  
-       /* Convert subquery pathkeys to outer representation */
-       pathkeys = convert_subquery_pathkeys(root, rel, subroot->query_pathkeys);
 +      /* Generate appropriate path */
 +#ifdef XCP
 +      if (subroot->distribution && subroot->distribution->distributionExpr)
 +      {
 +              ListCell *lc;
 +              /*
 +               * The distribution expression from the subplan's tlist, but it should
 +               * be from the rel, need conversion.
 +               */
 +              distribution = makeNode(Distribution);
 +              distribution->distributionType = subroot->distribution->distributionType;
 +              distribution->nodes = bms_copy(subroot->distribution->nodes);
 +              distribution->restrictNodes = bms_copy(subroot->distribution->restrictNodes);
 +              foreach(lc, rel->subplan->targetlist)
 +              {
 +                      TargetEntry *tle = (TargetEntry *) lfirst(lc);
 +                      if (equal(tle->expr, subroot->distribution->distributionExpr))
 +                      {
 +                              distribution->distributionExpr = (Node *)
 +                                              makeVarFromTargetEntry(rel->relid, tle);
 +                              break;
 +                      }
 +              }
 +      }
 +      else
 +              distribution = subroot->distribution;
 +      add_path(rel, create_subqueryscan_path(root, rel, pathkeys, required_outer,
 +                       distribution));
 +
 +      /* 
 +       * Temporarily block ORDER BY in subqueries until we can add support 
 +       * it in Postgres-XL without outputting incorrect results. Should
 +       * do this only in normal processing mode though!
 +     *
 +     * The extra conditions below try to handle cases where an ORDER BY
 +     * appears in a simple VIEW or INSERT SELECT.
 +     */
 +      if (IsUnderPostmaster &&
 +              list_length(subquery->sortClause) > 1
 +                              && (subroot->parent_root != root
 +                              || (subroot->parent_root == root 
 +                                      && (root->parse->commandType != CMD_SELECT
 +                                              || (root->parse->commandType == CMD_SELECT
 +                                                      && root->parse->hasWindowFuncs)))))
 +              elog(ERROR, "Postgres-XL does not currently support ORDER BY in subqueries");
 +#else
-       add_path(rel, create_subqueryscan_path(root, rel, pathkeys, required_outer));
- #endif
 +
+       /*
+        * For each Path that subquery_planner produced, make a SubqueryScanPath
+        * in the outer query.
+        */
+       foreach(lc, sub_final_rel->pathlist)
+       {
+               Path       *subpath = (Path *) lfirst(lc);
+               List       *pathkeys;
+               /* Convert subpath's pathkeys to outer representation */
+               pathkeys = convert_subquery_pathkeys(root,
+                                                                                        rel,
+                                                                                        subpath->pathkeys,
+                                                       make_tlist_from_pathtarget(subpath->pathtarget));
+               /* Generate outer path using this subpath */
+               add_path(rel, (Path *)
+                                create_subqueryscan_path(root, rel, subpath,
+                                                                                 pathkeys, required_outer));
+       }
++#endif
  }
  
  /*
index fd44c85bf9d4ad8032d238c8dcb31530cd314d2a,2a49639f1254a1e564169ff0cb4587a79b40f823..485717accea120654a9f56fa6d5c4c1d5e5f9e68
@@@ -57,8 -60,7 +60,8 @@@
   * values.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
@@@ -103,10 -106,8 +107,12 @@@ double           random_page_cost = DEFAULT_RAND
  double                cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
  double                cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
  double                cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
 +#ifdef XCP
 +double                network_byte_cost = DEFAULT_NETWORK_BYTE_COST;
 +double                remote_query_cost = DEFAULT_REMOTE_QUERY_COST;
 +#endif
+ double                parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
+ double                parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
  
  int                   effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
  
@@@ -2425,18 -2559,10 +2565,19 @@@ final_cost_mergejoin(PlannerInfo *root
         * off.
         */
        else if (enable_material && innersortkeys != NIL &&
-                        relation_byte_size(inner_path_rows, inner_path->parent->width) >
+                        relation_byte_size(inner_path_rows,
+                                                               inner_path->pathtarget->width) >
                         (work_mem * 1024L))
                path->materialize_inner = true;
 +#ifdef XCP
 +      /*
 +       * Even if innersortkeys are specified, we never add the Sort node on top
 +       * of RemoteSubplan, instead we set up internal sorter.
 +       * Since RemoteSubplan does not support mark/restore we must materialize it
 +       */
 +      else if (inner_path->pathtype == T_RemoteSubplan)
 +              path->materialize_inner = true;
 +#endif
        else
                path->materialize_inner = false;
  
index 42aceafcf2b838cfb9dd3a2b3fa4ddfef80680bd,54d601fc47d0ffbbf6bfc4d0cccb0e6660e39e65..775756f67d2ff0b159509f441b03e5b5c9a3543e
@@@ -5,8 -5,7 +5,8 @@@
   *      Planning is complete, we just need to convert the selected
   *      Path into a Plan.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -76,15 -81,38 +103,45 @@@ static Plan *create_join_plan(PlannerIn
  static Plan *create_append_plan(PlannerInfo *root, AppendPath *best_path);
  static Plan *create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path);
  static Result *create_result_plan(PlannerInfo *root, ResultPath *best_path);
- static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path);
- static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path);
 +#ifdef XCP
 +static void adjust_subplan_distribution(PlannerInfo *root, Distribution *pathd,
 +                                                Distribution *subd);
 +static RemoteSubplan *create_remotescan_plan(PlannerInfo *root,
 +                                         RemoteSubPath *best_path);
 +static char *get_internal_cursor(void);
 +#endif
+ static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
+                                        int flags);
+ static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
+                                  int flags);
+ static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
+ static Plan *create_projection_plan(PlannerInfo *root, ProjectionPath *best_path);
+ static Plan *inject_projection_plan(Plan *subplan, List *tlist);
+ static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
+ static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
+ static Unique *create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path,
+                                                int flags);
+ static Agg *create_agg_plan(PlannerInfo *root, AggPath *best_path);
+ static Plan *create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path);
+ static Result *create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path);
+ static WindowAgg *create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path);
+ static SetOp *create_setop_plan(PlannerInfo *root, SetOpPath *best_path,
+                                 int flags);
+ static RecursiveUnion *create_recursiveunion_plan(PlannerInfo *root, RecursiveUnionPath *best_path);
+ static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc,
+                                                  List *tlist,
+                                                  int numSortCols, AttrNumber *sortColIdx,
+                                                  int *partNumCols,
+                                                  AttrNumber **partColIdx,
+                                                  Oid **partOperators,
+                                                  int *ordNumCols,
+                                                  AttrNumber **ordColIdx,
+                                                  Oid **ordOperators);
+ static LockRows *create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
+                                        int flags);
+ static ModifyTable *create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path);
+ static Limit *create_limit_plan(PlannerInfo *root, LimitPath *best_path,
+                                 int flags);
  static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
                                        List *tlist, List *scan_clauses);
  static SampleScan *create_samplescan_plan(PlannerInfo *root, Path *best_path,
@@@ -204,13 -241,36 +270,41 @@@ static Plan *prepare_sort_from_pathkeys
  static EquivalenceMember *find_ec_member_for_tle(EquivalenceClass *ec,
                                           TargetEntry *tle,
                                           Relids relids);
+ static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys);
+ static Sort *make_sort_from_groupcols(List *groupcls,
+                                                AttrNumber *grpColIdx,
+                                                Plan *lefttree);
  static Material *make_material(Plan *lefttree);
+ static WindowAgg *make_windowagg(List *tlist, Index winref,
+                          int partNumCols, AttrNumber *partColIdx, Oid *partOperators,
+                          int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators,
+                          int frameOptions, Node *startOffset, Node *endOffset,
+                          Plan *lefttree);
+ static Group *make_group(List *tlist, List *qual, int numGroupCols,
+                  AttrNumber *grpColIdx, Oid *grpOperators,
+                  Plan *lefttree);
+ static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList);
+ static Unique *make_unique_from_pathkeys(Plan *lefttree,
+                                                 List *pathkeys, int numCols);
+ static Gather *make_gather(List *qptlist, List *qpqual,
+                       int nworkers, bool single_copy, Plan *subplan);
+ static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
+                  List *distinctList, AttrNumber flagColIdx, int firstFlag,
+                  long numGroups);
+ static LockRows *make_lockrows(Plan *lefttree, List *rowMarks, int epqParam);
+ static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
+ static ModifyTable *make_modifytable(PlannerInfo *root,
+                                CmdType operation, bool canSetTag,
+                                Index nominalRelation,
+                                List *resultRelations, List *subplans,
+                                List *withCheckOptionLists, List *returningLists,
+                                List *rowMarks, OnConflictExpr *onconflict, int epqParam);
  
 +#ifdef XCP
 +static int add_sort_column(AttrNumber colIdx, Oid sortOp, Oid coll,
 +                              bool nulls_first,int numCols, AttrNumber *sortColIdx,
 +                              Oid *sortOperators, Oid *collations, bool *nullsFirst);
 +#endif
  
  /*
   * create_plan
@@@ -238,14 -298,28 +332,33 @@@ create_plan(PlannerInfo *root, Path *be
        /* Initialize this module's private workspace in PlannerInfo */
        root->curOuterRels = NULL;
        root->curOuterParams = NIL;
 +#ifdef XCP
 +      root->curOuterRestrict = NULL;
 +      adjust_subplan_distribution(root, root->distribution,
 +                                                        best_path->distribution);
 +#endif
  
-       /* Recursively process the path tree */
-       plan = create_plan_recurse(root, best_path);
+       /* Recursively process the path tree, demanding the correct tlist result */
+       plan = create_plan_recurse(root, best_path, CP_EXACT_TLIST);
+       /*
+        * Make sure the topmost plan node's targetlist exposes the original
+        * column names and other decorative info.  Targetlists generated within
+        * the planner don't bother with that stuff, but we must have it on the
+        * top-level tlist seen at execution time.  However, ModifyTable plan
+        * nodes don't have a tlist matching the querytree targetlist.
+        */
+       if (!IsA(plan, ModifyTable))
+               apply_tlist_labeling(plan->targetlist, root->processed_tlist);
+       /*
+        * Attach any initPlans created in this query level to the topmost plan
+        * node.  (In principle the initplans could go in any plan node at or
+        * above where they're referenced, but there seems no reason to put them
+        * any lower than the topmost node for the query level.  Also, see
+        * comments for SS_finalize_plan before you try to change this.)
+        */
+       SS_attach_initplans(root, plan);
  
        /* Check we successfully assigned all NestLoopParams to plan nodes */
        if (root->curOuterParams != NIL)
@@@ -284,14 -358,8 +397,14 @@@ create_plan_recurse(PlannerInfo *root, 
                case T_WorkTableScan:
                case T_ForeignScan:
                case T_CustomScan:
-                       plan = create_scan_plan(root, best_path);
+                       plan = create_scan_plan(root, best_path, flags);
                        break;
 +#ifdef XCP
 +              case T_RemoteSubplan:
 +                      plan = (Plan *) create_remotescan_plan(root,
 +                                                                                                 (RemoteSubPath *) best_path);
 +                      break;
 +#endif
                case T_HashJoin:
                case T_MergeJoin:
                case T_NestLoop:
@@@ -1003,153 -1230,1018 +1275,1026 @@@ create_unique_plan(PlannerInfo *root, U
                tle = tlist_member(uniqexpr, newtlist);
                if (!tle)
                {
-                       tle = makeTargetEntry((Expr *) uniqexpr,
-                                                                 nextresno,
-                                                                 NULL,
-                                                                 false);
-                       newtlist = lappend(newtlist, tle);
-                       nextresno++;
-                       newitems = true;
+                       tle = makeTargetEntry((Expr *) uniqexpr,
+                                                                 nextresno,
+                                                                 NULL,
+                                                                 false);
+                       newtlist = lappend(newtlist, tle);
+                       nextresno++;
+                       newitems = true;
+               }
+       }
+       if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
+       {
+               /*
+                * If the top plan node can't do projections and its existing target
+                * list isn't already what we need, we need to add a Result node to
+                * help it along.
+                */
+               if (!is_projection_capable_plan(subplan) &&
+                       !tlist_same_exprs(newtlist, subplan->targetlist))
+                       subplan = inject_projection_plan(subplan, newtlist);
+               else
+                       subplan->targetlist = newtlist;
++#ifdef XCP
++              /*
++               * RemoteSubplan is conditionally projection capable - it is pushing
++               * projection to the data nodes
++               */
++              if (IsA(subplan, RemoteSubplan))
++                      subplan->lefttree->targetlist = newtlist;
++#endif
+       }
+       /*
+        * Build control information showing which subplan output columns are to
+        * be examined by the grouping step.  Unfortunately we can't merge this
+        * with the previous loop, since we didn't then know which version of the
+        * subplan tlist we'd end up using.
+        */
+       newtlist = subplan->targetlist;
+       numGroupCols = list_length(uniq_exprs);
+       groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
+       groupColPos = 0;
+       foreach(l, uniq_exprs)
+       {
+               Node       *uniqexpr = lfirst(l);
+               TargetEntry *tle;
+               tle = tlist_member(uniqexpr, newtlist);
+               if (!tle)                               /* shouldn't happen */
+                       elog(ERROR, "failed to find unique expression in subplan tlist");
+               groupColIdx[groupColPos++] = tle->resno;
+       }
+       if (best_path->umethod == UNIQUE_PATH_HASH)
+       {
+               Oid                *groupOperators;
+               /*
+                * Get the hashable equality operators for the Agg node to use.
+                * Normally these are the same as the IN clause operators, but if
+                * those are cross-type operators then the equality operators are the
+                * ones for the IN clause operators' RHS datatype.
+                */
+               groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
+               groupColPos = 0;
+               foreach(l, in_operators)
+               {
+                       Oid                     in_oper = lfirst_oid(l);
+                       Oid                     eq_oper;
+                       if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
+                               elog(ERROR, "could not find compatible hash operator for operator %u",
+                                        in_oper);
+                       groupOperators[groupColPos++] = eq_oper;
+               }
+               /*
+                * Since the Agg node is going to project anyway, we can give it the
+                * minimum output tlist, without any stuff we might have added to the
+                * subplan tlist.
+                */
+               plan = (Plan *) make_agg(build_path_tlist(root, &best_path->path),
+                                                                NIL,
+                                                                AGG_HASHED,
+                                                                AGGSPLIT_SIMPLE,
+                                                                numGroupCols,
+                                                                groupColIdx,
+                                                                groupOperators,
+                                                                NIL,
+                                                                NIL,
+                                                                best_path->path.rows,
+                                                                subplan);
+       }
+       else
+       {
+               List       *sortList = NIL;
+               Sort       *sort;
+               /* Create an ORDER BY list to sort the input compatibly */
+               groupColPos = 0;
+               foreach(l, in_operators)
+               {
+                       Oid                     in_oper = lfirst_oid(l);
+                       Oid                     sortop;
+                       Oid                     eqop;
+                       TargetEntry *tle;
+                       SortGroupClause *sortcl;
+                       sortop = get_ordering_op_for_equality_op(in_oper, false);
+                       if (!OidIsValid(sortop))        /* shouldn't happen */
+                               elog(ERROR, "could not find ordering operator for equality operator %u",
+                                        in_oper);
+                       /*
+                        * The Unique node will need equality operators.  Normally these
+                        * are the same as the IN clause operators, but if those are
+                        * cross-type operators then the equality operators are the ones
+                        * for the IN clause operators' RHS datatype.
+                        */
+                       eqop = get_equality_op_for_ordering_op(sortop, NULL);
+                       if (!OidIsValid(eqop))          /* shouldn't happen */
+                               elog(ERROR, "could not find equality operator for ordering operator %u",
+                                        sortop);
+                       tle = get_tle_by_resno(subplan->targetlist,
+                                                                  groupColIdx[groupColPos]);
+                       Assert(tle != NULL);
+                       sortcl = makeNode(SortGroupClause);
+                       sortcl->tleSortGroupRef = assignSortGroupRef(tle,
+                                                                                                                subplan->targetlist);
+                       sortcl->eqop = eqop;
+                       sortcl->sortop = sortop;
+                       sortcl->nulls_first = false;
+                       sortcl->hashable = false;       /* no need to make this accurate */
+                       sortList = lappend(sortList, sortcl);
+                       groupColPos++;
+               }
+               sort = make_sort_from_sortclauses(sortList, subplan);
+               label_sort_with_costsize(root, sort, -1.0);
+               plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList);
+       }
+       /* Copy cost data from Path to Plan */
+       copy_generic_path_info(plan, &best_path->path);
+       return plan;
+ }
+ /*
+  * create_gather_plan
+  *
+  *      Create a Gather plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static Gather *
+ create_gather_plan(PlannerInfo *root, GatherPath *best_path)
+ {
+       Gather     *gather_plan;
+       Plan       *subplan;
+       List       *tlist;
+       /*
+        * Although the Gather node can project, we prefer to push down such work
+        * to its child node, so demand an exact tlist from the child.
+        */
+       subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
+       tlist = build_path_tlist(root, &best_path->path);
+       gather_plan = make_gather(tlist,
+                                                         NIL,
+                                                         best_path->path.parallel_workers,
+                                                         best_path->single_copy,
+                                                         subplan);
+       copy_generic_path_info(&gather_plan->plan, &best_path->path);
+       /* use parallel mode for parallel plans. */
+       root->glob->parallelModeNeeded = true;
+       return gather_plan;
+ }
+ /*
+  * create_projection_plan
+  *
+  *      Create a plan tree to do a projection step and (recursively) plans
+  *      for its subpaths.  We may need a Result node for the projection,
+  *      but sometimes we can just let the subplan do the work.
+  */
+ static Plan *
+ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path)
+ {
+       Plan       *plan;
+       Plan       *subplan;
+       List       *tlist;
+       /* Since we intend to project, we don't need to constrain child tlist */
+       subplan = create_plan_recurse(root, best_path->subpath, 0);
+       tlist = build_path_tlist(root, &best_path->path);
+       /*
+        * We might not really need a Result node here, either because the subplan
+        * can project or because it's returning the right list of expressions
+        * anyway.  Usually create_projection_path will have detected that and set
+        * dummypp if we don't need a Result; but its decision can't be final,
+        * because some createplan.c routines change the tlists of their nodes.
+        * (An example is that create_merge_append_plan might add resjunk sort
+        * columns to a MergeAppend.)  So we have to recheck here.  If we do
+        * arrive at a different answer than create_projection_path did, we'll
+        * have made slightly wrong cost estimates; but label the plan with the
+        * cost estimates we actually used, not "corrected" ones.  (XXX this could
+        * be cleaned up if we moved more of the sortcolumn setup logic into Path
+        * creation, but that would add expense to creating Paths we might end up
+        * not using.)
+        */
+       if (is_projection_capable_path(best_path->subpath) ||
+               tlist_same_exprs(tlist, subplan->targetlist))
+       {
+               /* Don't need a separate Result, just assign tlist to subplan */
+               plan = subplan;
+               plan->targetlist = tlist;
+               /* Label plan with the estimated costs we actually used */
+               plan->startup_cost = best_path->path.startup_cost;
+               plan->total_cost = best_path->path.total_cost;
+               plan->plan_rows = best_path->path.rows;
+               plan->plan_width = best_path->path.pathtarget->width;
+               /* ... but be careful not to munge subplan's parallel-aware flag */
+       }
+       else
+       {
+               /* We need a Result node */
+               plan = (Plan *) make_result(tlist, NULL, subplan);
+               copy_generic_path_info(plan, (Path *) best_path);
+       }
+       return plan;
+ }
+ /*
+  * inject_projection_plan
+  *      Insert a Result node to do a projection step.
+  *
+  * This is used in a few places where we decide on-the-fly that we need a
+  * projection step as part of the tree generated for some Path node.
+  * We should try to get rid of this in favor of doing it more honestly.
+  */
+ static Plan *
+ inject_projection_plan(Plan *subplan, List *tlist)
+ {
+       Plan       *plan;
+       plan = (Plan *) make_result(tlist, NULL, subplan);
+       /*
+        * In principle, we should charge tlist eval cost plus cpu_per_tuple per
+        * row for the Result node.  But the former has probably been factored in
+        * already and the latter was not accounted for during Path construction,
+        * so being formally correct might just make the EXPLAIN output look less
+        * consistent not more so.  Hence, just copy the subplan's cost.
+        */
+       copy_plan_costsize(plan, subplan);
+       return plan;
+ }
+ /*
+  * create_sort_plan
+  *
+  *      Create a Sort plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static Sort *
+ create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
+ {
+       Sort       *plan;
+       Plan       *subplan;
+       /*
+        * We don't want any excess columns in the sorted tuples, so request a
+        * smaller tlist.  Otherwise, since Sort doesn't project, tlist
+        * requirements pass through.
+        */
+       subplan = create_plan_recurse(root, best_path->subpath,
+                                                                 flags | CP_SMALL_TLIST);
+       plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys);
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
+       return plan;
+ }
+ /*
+  * create_group_plan
+  *
+  *      Create a Group plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static Group *
+ create_group_plan(PlannerInfo *root, GroupPath *best_path)
+ {
+       Group      *plan;
+       Plan       *subplan;
+       List       *tlist;
+       List       *quals;
+       /*
+        * Group can project, so no need to be terribly picky about child tlist,
+        * but we do need grouping columns to be available
+        */
+       subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+       tlist = build_path_tlist(root, &best_path->path);
+       quals = order_qual_clauses(root, best_path->qual);
+       plan = make_group(tlist,
+                                         quals,
+                                         list_length(best_path->groupClause),
+                                         extract_grouping_cols(best_path->groupClause,
+                                                                                       subplan->targetlist),
+                                         extract_grouping_ops(best_path->groupClause),
+                                         subplan);
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
+       return plan;
+ }
+ /*
+  * create_upper_unique_plan
+  *
+  *      Create a Unique plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static Unique *
+ create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path, int flags)
+ {
+       Unique     *plan;
+       Plan       *subplan;
+       /*
+        * Unique doesn't project, so tlist requirements pass through; moreover we
+        * need grouping columns to be labeled.
+        */
+       subplan = create_plan_recurse(root, best_path->subpath,
+                                                                 flags | CP_LABEL_TLIST);
+       plan = make_unique_from_pathkeys(subplan,
+                                                                        best_path->path.pathkeys,
+                                                                        best_path->numkeys);
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
+       return plan;
+ }
+ /*
+  * create_agg_plan
+  *
+  *      Create an Agg plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static Agg *
+ create_agg_plan(PlannerInfo *root, AggPath *best_path)
+ {
+       Agg                *plan;
+       Plan       *subplan;
+       List       *tlist;
+       List       *quals;
+       /*
+        * Agg can project, so no need to be terribly picky about child tlist, but
+        * we do need grouping columns to be available
+        */
+       subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+       tlist = build_path_tlist(root, &best_path->path);
+       quals = order_qual_clauses(root, best_path->qual);
+       plan = make_agg(tlist, quals,
+                                       best_path->aggstrategy,
+                                       best_path->aggsplit,
+                                       list_length(best_path->groupClause),
+                                       extract_grouping_cols(best_path->groupClause,
+                                                                                 subplan->targetlist),
+                                       extract_grouping_ops(best_path->groupClause),
+                                       NIL,
+                                       NIL,
+                                       best_path->numGroups,
+                                       subplan);
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
+       return plan;
+ }
+ /*
+  * Given a groupclause for a collection of grouping sets, produce the
+  * corresponding groupColIdx.
+  *
+  * root->grouping_map maps the tleSortGroupRef to the actual column position in
+  * the input tuple. So we get the ref from the entries in the groupclause and
+  * look them up there.
+  */
+ static AttrNumber *
+ remap_groupColIdx(PlannerInfo *root, List *groupClause)
+ {
+       AttrNumber *grouping_map = root->grouping_map;
+       AttrNumber *new_grpColIdx;
+       ListCell   *lc;
+       int                     i;
+       Assert(grouping_map);
+       new_grpColIdx = palloc0(sizeof(AttrNumber) * list_length(groupClause));
+       i = 0;
+       foreach(lc, groupClause)
+       {
+               SortGroupClause *clause = lfirst(lc);
+               new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
+       }
+       return new_grpColIdx;
+ }
+ /*
+  * create_groupingsets_plan
+  *      Create a plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  *
+  *      What we emit is an Agg plan with some vestigial Agg and Sort nodes
+  *      hanging off the side.  The top Agg implements the last grouping set
+  *      specified in the GroupingSetsPath, and any additional grouping sets
+  *      each give rise to a subsidiary Agg and Sort node in the top Agg's
+  *      "chain" list.  These nodes don't participate in the plan directly,
+  *      but they are a convenient way to represent the required data for
+  *      the extra steps.
+  *
+  *      Returns a Plan node.
+  */
+ static Plan *
+ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path)
+ {
+       Agg                *plan;
+       Plan       *subplan;
+       List       *rollup_groupclauses = best_path->rollup_groupclauses;
+       List       *rollup_lists = best_path->rollup_lists;
+       AttrNumber *grouping_map;
+       int                     maxref;
+       List       *chain;
+       ListCell   *lc,
+                          *lc2;
+       /* Shouldn't get here without grouping sets */
+       Assert(root->parse->groupingSets);
+       Assert(rollup_lists != NIL);
+       Assert(list_length(rollup_lists) == list_length(rollup_groupclauses));
+       /*
+        * Agg can project, so no need to be terribly picky about child tlist, but
+        * we do need grouping columns to be available
+        */
+       subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+       /*
+        * Compute the mapping from tleSortGroupRef to column index in the child's
+        * tlist.  First, identify max SortGroupRef in groupClause, for array
+        * sizing.
+        */
+       maxref = 0;
+       foreach(lc, root->parse->groupClause)
+       {
+               SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
+               if (gc->tleSortGroupRef > maxref)
+                       maxref = gc->tleSortGroupRef;
+       }
+       grouping_map = (AttrNumber *) palloc0((maxref + 1) * sizeof(AttrNumber));
+       /* Now look up the column numbers in the child's tlist */
+       foreach(lc, root->parse->groupClause)
+       {
+               SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
+               TargetEntry *tle = get_sortgroupclause_tle(gc, subplan->targetlist);
+               grouping_map[gc->tleSortGroupRef] = tle->resno;
+       }
+       /*
+        * During setrefs.c, we'll need the grouping_map to fix up the cols lists
+        * in GroupingFunc nodes.  Save it for setrefs.c to use.
+        *
+        * This doesn't work if we're in an inheritance subtree (see notes in
+        * create_modifytable_plan).  Fortunately we can't be because there would
+        * never be grouping in an UPDATE/DELETE; but let's Assert that.
+        */
+       Assert(!root->hasInheritedTarget);
+       Assert(root->grouping_map == NULL);
+       root->grouping_map = grouping_map;
+       /*
+        * Generate the side nodes that describe the other sort and group
+        * operations besides the top one.  Note that we don't worry about putting
+        * accurate cost estimates in the side nodes; only the topmost Agg node's
+        * costs will be shown by EXPLAIN.
+        */
+       chain = NIL;
+       if (list_length(rollup_groupclauses) > 1)
+       {
+               forboth(lc, rollup_groupclauses, lc2, rollup_lists)
+               {
+                       List       *groupClause = (List *) lfirst(lc);
+                       List       *gsets = (List *) lfirst(lc2);
+                       AttrNumber *new_grpColIdx;
+                       Plan       *sort_plan;
+                       Plan       *agg_plan;
+                       /* We want to iterate over all but the last rollup list elements */
+                       if (lnext(lc) == NULL)
+                               break;
+                       new_grpColIdx = remap_groupColIdx(root, groupClause);
+                       sort_plan = (Plan *)
+                               make_sort_from_groupcols(groupClause,
+                                                                                new_grpColIdx,
+                                                                                subplan);
+                       agg_plan = (Plan *) make_agg(NIL,
+                                                                                NIL,
+                                                                                AGG_SORTED,
+                                                                                AGGSPLIT_SIMPLE,
+                                                                          list_length((List *) linitial(gsets)),
+                                                                                new_grpColIdx,
+                                                                                extract_grouping_ops(groupClause),
+                                                                                gsets,
+                                                                                NIL,
+                                                                                0,             /* numGroups not needed */
+                                                                                sort_plan);
+                       /*
+                        * Nuke stuff we don't need to avoid bloating debug output.
+                        */
+                       sort_plan->targetlist = NIL;
+                       sort_plan->lefttree = NULL;
+                       chain = lappend(chain, agg_plan);
+               }
+       }
+       /*
+        * Now make the final Agg node
+        */
+       {
+               List       *groupClause = (List *) llast(rollup_groupclauses);
+               List       *gsets = (List *) llast(rollup_lists);
+               AttrNumber *top_grpColIdx;
+               int                     numGroupCols;
+               top_grpColIdx = remap_groupColIdx(root, groupClause);
+               numGroupCols = list_length((List *) linitial(gsets));
+               plan = make_agg(build_path_tlist(root, &best_path->path),
+                                               best_path->qual,
+                                               (numGroupCols > 0) ? AGG_SORTED : AGG_PLAIN,
+                                               AGGSPLIT_SIMPLE,
+                                               numGroupCols,
+                                               top_grpColIdx,
+                                               extract_grouping_ops(groupClause),
+                                               gsets,
+                                               chain,
+                                               0,              /* numGroups not needed */
+                                               subplan);
+               /* Copy cost data from Path to Plan */
+               copy_generic_path_info(&plan->plan, &best_path->path);
+       }
+       return (Plan *) plan;
+ }
+ /*
+  * create_minmaxagg_plan
+  *
+  *      Create a Result plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static Result *
+ create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path)
+ {
+       Result     *plan;
+       List       *tlist;
+       ListCell   *lc;
+       /* Prepare an InitPlan for each aggregate's subquery. */
+       foreach(lc, best_path->mmaggregates)
+       {
+               MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
+               PlannerInfo *subroot = mminfo->subroot;
+               Query      *subparse = subroot->parse;
+               Plan       *plan;
+               /*
+                * Generate the plan for the subquery. We already have a Path, but we
+                * have to convert it to a Plan and attach a LIMIT node above it.
+                * Since we are entering a different planner context (subroot),
+                * recurse to create_plan not create_plan_recurse.
+                */
+               plan = create_plan(subroot, mminfo->path);
+               plan = (Plan *) make_limit(plan,
+                                                                  subparse->limitOffset,
+                                                                  subparse->limitCount);
+               /* Must apply correct cost/width data to Limit node */
+               plan->startup_cost = mminfo->path->startup_cost;
+               plan->total_cost = mminfo->pathcost;
+               plan->plan_rows = 1;
+               plan->plan_width = mminfo->path->pathtarget->width;
+               plan->parallel_aware = false;
+               /* Convert the plan into an InitPlan in the outer query. */
+               SS_make_initplan_from_plan(root, subroot, plan, mminfo->param);
+       }
+       /* Generate the output plan --- basically just a Result */
+       tlist = build_path_tlist(root, &best_path->path);
+       plan = make_result(tlist, (Node *) best_path->quals, NULL);
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
+       /*
+        * During setrefs.c, we'll need to replace references to the Agg nodes
+        * with InitPlan output params.  (We can't just do that locally in the
+        * MinMaxAgg node, because path nodes above here may have Agg references
+        * as well.)  Save the mmaggregates list to tell setrefs.c to do that.
+        *
+        * This doesn't work if we're in an inheritance subtree (see notes in
+        * create_modifytable_plan).  Fortunately we can't be because there would
+        * never be aggregates in an UPDATE/DELETE; but let's Assert that.
+        */
+       Assert(!root->hasInheritedTarget);
+       Assert(root->minmax_aggs == NIL);
+       root->minmax_aggs = best_path->mmaggregates;
+       return plan;
+ }
+ /*
+  * create_windowagg_plan
+  *
+  *      Create a WindowAgg plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static WindowAgg *
+ create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path)
+ {
+       WindowAgg  *plan;
+       WindowClause *wc = best_path->winclause;
+       Plan       *subplan;
+       List       *tlist;
+       int                     numsortkeys;
+       AttrNumber *sortColIdx;
+       Oid                *sortOperators;
+       Oid                *collations;
+       bool       *nullsFirst;
+       int                     partNumCols;
+       AttrNumber *partColIdx;
+       Oid                *partOperators;
+       int                     ordNumCols;
+       AttrNumber *ordColIdx;
+       Oid                *ordOperators;
+       /*
+        * WindowAgg can project, so no need to be terribly picky about child
+        * tlist, but we do need grouping columns to be available
+        */
+       subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+       tlist = build_path_tlist(root, &best_path->path);
+       /*
+        * We shouldn't need to actually sort, but it's convenient to use
+        * prepare_sort_from_pathkeys to identify the input's sort columns.
+        */
+       subplan = prepare_sort_from_pathkeys(subplan,
+                                                                                best_path->winpathkeys,
+                                                                                NULL,
+                                                                                NULL,
+                                                                                false,
+                                                                                &numsortkeys,
+                                                                                &sortColIdx,
+                                                                                &sortOperators,
+                                                                                &collations,
+                                                                                &nullsFirst);
+       /* Now deconstruct that into partition and ordering portions */
+       get_column_info_for_window(root,
+                                                          wc,
+                                                          subplan->targetlist,
+                                                          numsortkeys,
+                                                          sortColIdx,
+                                                          &partNumCols,
+                                                          &partColIdx,
+                                                          &partOperators,
+                                                          &ordNumCols,
+                                                          &ordColIdx,
+                                                          &ordOperators);
+       /* And finally we can make the WindowAgg node */
+       plan = make_windowagg(tlist,
+                                                 wc->winref,
+                                                 partNumCols,
+                                                 partColIdx,
+                                                 partOperators,
+                                                 ordNumCols,
+                                                 ordColIdx,
+                                                 ordOperators,
+                                                 wc->frameOptions,
+                                                 wc->startOffset,
+                                                 wc->endOffset,
+                                                 subplan);
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
+       return plan;
+ }
+ /*
+  * get_column_info_for_window
+  *            Get the partitioning/ordering column numbers and equality operators
+  *            for a WindowAgg node.
+  *
+  * This depends on the behavior of planner.c's make_pathkeys_for_window!
+  *
+  * We are given the target WindowClause and an array of the input column
+  * numbers associated with the resulting pathkeys.  In the easy case, there
+  * are the same number of pathkey columns as partitioning + ordering columns
+  * and we just have to copy some data around.  However, it's possible that
+  * some of the original partitioning + ordering columns were eliminated as
+  * redundant during the transformation to pathkeys.  (This can happen even
+  * though the parser gets rid of obvious duplicates.  A typical scenario is a
+  * window specification "PARTITION BY x ORDER BY y" coupled with a clause
+  * "WHERE x = y" that causes the two sort columns to be recognized as
+  * redundant.)        In that unusual case, we have to work a lot harder to
+  * determine which keys are significant.
+  *
+  * The method used here is a bit brute-force: add the sort columns to a list
+  * one at a time and note when the resulting pathkey list gets longer.  But
+  * it's a sufficiently uncommon case that a faster way doesn't seem worth
+  * the amount of code refactoring that'd be needed.
+  */
+ static void
+ get_column_info_for_window(PlannerInfo *root, WindowClause *wc, List *tlist,
+                                                  int numSortCols, AttrNumber *sortColIdx,
+                                                  int *partNumCols,
+                                                  AttrNumber **partColIdx,
+                                                  Oid **partOperators,
+                                                  int *ordNumCols,
+                                                  AttrNumber **ordColIdx,
+                                                  Oid **ordOperators)
+ {
+       int                     numPart = list_length(wc->partitionClause);
+       int                     numOrder = list_length(wc->orderClause);
+       if (numSortCols == numPart + numOrder)
+       {
+               /* easy case */
+               *partNumCols = numPart;
+               *partColIdx = sortColIdx;
+               *partOperators = extract_grouping_ops(wc->partitionClause);
+               *ordNumCols = numOrder;
+               *ordColIdx = sortColIdx + numPart;
+               *ordOperators = extract_grouping_ops(wc->orderClause);
+       }
+       else
+       {
+               List       *sortclauses;
+               List       *pathkeys;
+               int                     scidx;
+               ListCell   *lc;
+               /* first, allocate what's certainly enough space for the arrays */
+               *partNumCols = 0;
+               *partColIdx = (AttrNumber *) palloc(numPart * sizeof(AttrNumber));
+               *partOperators = (Oid *) palloc(numPart * sizeof(Oid));
+               *ordNumCols = 0;
+               *ordColIdx = (AttrNumber *) palloc(numOrder * sizeof(AttrNumber));
+               *ordOperators = (Oid *) palloc(numOrder * sizeof(Oid));
+               sortclauses = NIL;
+               pathkeys = NIL;
+               scidx = 0;
+               foreach(lc, wc->partitionClause)
+               {
+                       SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
+                       List       *new_pathkeys;
+                       sortclauses = lappend(sortclauses, sgc);
+                       new_pathkeys = make_pathkeys_for_sortclauses(root,
+                                                                                                                sortclauses,
+                                                                                                                tlist);
+                       if (list_length(new_pathkeys) > list_length(pathkeys))
+                       {
+                               /* this sort clause is actually significant */
+                               (*partColIdx)[*partNumCols] = sortColIdx[scidx++];
+                               (*partOperators)[*partNumCols] = sgc->eqop;
+                               (*partNumCols)++;
+                               pathkeys = new_pathkeys;
+                       }
+               }
+               foreach(lc, wc->orderClause)
+               {
+                       SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
+                       List       *new_pathkeys;
+                       sortclauses = lappend(sortclauses, sgc);
+                       new_pathkeys = make_pathkeys_for_sortclauses(root,
+                                                                                                                sortclauses,
+                                                                                                                tlist);
+                       if (list_length(new_pathkeys) > list_length(pathkeys))
+                       {
+                               /* this sort clause is actually significant */
+                               (*ordColIdx)[*ordNumCols] = sortColIdx[scidx++];
+                               (*ordOperators)[*ordNumCols] = sgc->eqop;
+                               (*ordNumCols)++;
+                               pathkeys = new_pathkeys;
+                       }
                }
+               /* complain if we didn't eat exactly the right number of sort cols */
+               if (scidx != numSortCols)
+                       elog(ERROR, "failed to deconstruct sort operators into partitioning/ordering operators");
        }
+ }
  
-       if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
-       {
-               /*
-                * If the top plan node can't do projections and its existing target
-                * list isn't already what we need, we need to add a Result node to
-                * help it along.
-                */
-               if (!is_projection_capable_plan(subplan) &&
-                       !tlist_same_exprs(newtlist, subplan->targetlist))
-                       subplan = (Plan *) make_result(root, newtlist, NULL, subplan);
-               else
-                       subplan->targetlist = newtlist;
- #ifdef XCP
-               /*
-                * RemoteSubplan is conditionally projection capable - it is pushing
-                * projection to the data nodes
-                */
-               if (IsA(subplan, RemoteSubplan))
-                       subplan->lefttree->targetlist = newtlist;
- #endif
-       }
+ /*
+  * create_setop_plan
+  *
+  *      Create a SetOp plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static SetOp *
+ create_setop_plan(PlannerInfo *root, SetOpPath *best_path, int flags)
+ {
+       SetOp      *plan;
+       Plan       *subplan;
+       long            numGroups;
  
        /*
-        * Build control information showing which subplan output columns are to
-        * be examined by the grouping step.  Unfortunately we can't merge this
-        * with the previous loop, since we didn't then know which version of the
-        * subplan tlist we'd end up using.
+        * SetOp doesn't project, so tlist requirements pass through; moreover we
+        * need grouping columns to be labeled.
         */
-       newtlist = subplan->targetlist;
-       numGroupCols = list_length(uniq_exprs);
-       groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
+       subplan = create_plan_recurse(root, best_path->subpath,
+                                                                 flags | CP_LABEL_TLIST);
  
-       groupColPos = 0;
-       foreach(l, uniq_exprs)
-       {
-               Node       *uniqexpr = lfirst(l);
-               TargetEntry *tle;
+       /* Convert numGroups to long int --- but 'ware overflow! */
+       numGroups = (long) Min(best_path->numGroups, (double) LONG_MAX);
  
-               tle = tlist_member(uniqexpr, newtlist);
-               if (!tle)                               /* shouldn't happen */
-                       elog(ERROR, "failed to find unique expression in subplan tlist");
-               groupColIdx[groupColPos++] = tle->resno;
-       }
+       plan = make_setop(best_path->cmd,
+                                         best_path->strategy,
+                                         subplan,
+                                         best_path->distinctList,
+                                         best_path->flagColIdx,
+                                         best_path->firstFlag,
+                                         numGroups);
  
-       if (best_path->umethod == UNIQUE_PATH_HASH)
-       {
-               long            numGroups;
-               Oid                *groupOperators;
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
  
-               numGroups = (long) Min(best_path->path.rows, (double) LONG_MAX);
+       return plan;
+ }
  
-               /*
-                * Get the hashable equality operators for the Agg node to use.
-                * Normally these are the same as the IN clause operators, but if
-                * those are cross-type operators then the equality operators are the
-                * ones for the IN clause operators' RHS datatype.
-                */
-               groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
-               groupColPos = 0;
-               foreach(l, in_operators)
-               {
-                       Oid                     in_oper = lfirst_oid(l);
-                       Oid                     eq_oper;
+ /*
+  * create_recursiveunion_plan
+  *
+  *      Create a RecursiveUnion plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static RecursiveUnion *
+ create_recursiveunion_plan(PlannerInfo *root, RecursiveUnionPath *best_path)
+ {
+       RecursiveUnion *plan;
+       Plan       *leftplan;
+       Plan       *rightplan;
+       List       *tlist;
+       long            numGroups;
  
-                       if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
-                               elog(ERROR, "could not find compatible hash operator for operator %u",
-                                        in_oper);
-                       groupOperators[groupColPos++] = eq_oper;
-               }
+       /* Need both children to produce same tlist, so force it */
+       leftplan = create_plan_recurse(root, best_path->leftpath, CP_EXACT_TLIST);
+       rightplan = create_plan_recurse(root, best_path->rightpath, CP_EXACT_TLIST);
+       tlist = build_path_tlist(root, &best_path->path);
+       /* Convert numGroups to long int --- but 'ware overflow! */
+       numGroups = (long) Min(best_path->numGroups, (double) LONG_MAX);
+       plan = make_recursive_union(tlist,
+                                                               leftplan,
+                                                               rightplan,
+                                                               best_path->wtParam,
+                                                               best_path->distinctList,
+                                                               numGroups);
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
+       return plan;
+ }
+ /*
+  * create_lockrows_plan
+  *
+  *      Create a LockRows plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static LockRows *
+ create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
+                                        int flags)
+ {
+       LockRows   *plan;
+       Plan       *subplan;
+       /* LockRows doesn't project, so tlist requirements pass through */
+       subplan = create_plan_recurse(root, best_path->subpath, flags);
+       plan = make_lockrows(subplan, best_path->rowMarks, best_path->epqParam);
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
+       return plan;
+ }
+ /*
+  * create_modifytable_plan
+  *      Create a ModifyTable plan for 'best_path'.
+  *
+  *      Returns a Plan node.
+  */
+ static ModifyTable *
+ create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path)
+ {
+       ModifyTable *plan;
+       List       *subplans = NIL;
+       ListCell   *subpaths,
+                          *subroots;
+       /* Build the plan for each input path */
+       forboth(subpaths, best_path->subpaths,
+                       subroots, best_path->subroots)
+       {
+               Path       *subpath = (Path *) lfirst(subpaths);
+               PlannerInfo *subroot = (PlannerInfo *) lfirst(subroots);
+               Plan       *subplan;
  
                /*
-                * Since the Agg node is going to project anyway, we can give it the
-                * minimum output tlist, without any stuff we might have added to the
-                * subplan tlist.
+                * In an inherited UPDATE/DELETE, reference the per-child modified
+                * subroot while creating Plans from Paths for the child rel.  This is
+                * a kluge, but otherwise it's too hard to ensure that Plan creation
+                * functions (particularly in FDWs) don't depend on the contents of
+                * "root" matching what they saw at Path creation time.  The main
+                * downside is that creation functions for Plans that might appear
+                * below a ModifyTable cannot expect to modify the contents of "root"
+                * and have it "stick" for subsequent processing such as setrefs.c.
+                * That's not great, but it seems better than the alternative.
                 */
-               plan = (Plan *) make_agg(root,
-                                                                build_path_tlist(root, &best_path->path),
-                                                                NIL,
-                                                                AGG_HASHED,
-                                                                NULL,
-                                                                numGroupCols,
-                                                                groupColIdx,
-                                                                groupOperators,
-                                                                NIL,
-                                                                numGroups,
-                                                                subplan);
+               subplan = create_plan_recurse(subroot, subpath, CP_EXACT_TLIST);
+               /* Transfer resname/resjunk labeling, too, to keep executor happy */
+               apply_tlist_labeling(subplan->targetlist, subroot->processed_tlist);
+               subplans = lappend(subplans, subplan);
        }
-       else
-       {
-               List       *sortList = NIL;
  
-               /* Create an ORDER BY list to sort the input compatibly */
-               groupColPos = 0;
-               foreach(l, in_operators)
-               {
-                       Oid                     in_oper = lfirst_oid(l);
-                       Oid                     sortop;
-                       Oid                     eqop;
-                       TargetEntry *tle;
-                       SortGroupClause *sortcl;
+       plan = make_modifytable(root,
+                                                       best_path->operation,
+                                                       best_path->canSetTag,
+                                                       best_path->nominalRelation,
+                                                       best_path->resultRelations,
+                                                       subplans,
+                                                       best_path->withCheckOptionLists,
+                                                       best_path->returningLists,
+                                                       best_path->rowMarks,
+                                                       best_path->onconflict,
+                                                       best_path->epqParam);
  
-                       sortop = get_ordering_op_for_equality_op(in_oper, false);
-                       if (!OidIsValid(sortop))        /* shouldn't happen */
-                               elog(ERROR, "could not find ordering operator for equality operator %u",
-                                        in_oper);
+       copy_generic_path_info(&plan->plan, &best_path->path);
  
-                       /*
-                        * The Unique node will need equality operators.  Normally these
-                        * are the same as the IN clause operators, but if those are
-                        * cross-type operators then the equality operators are the ones
-                        * for the IN clause operators' RHS datatype.
-                        */
-                       eqop = get_equality_op_for_ordering_op(sortop, NULL);
-                       if (!OidIsValid(eqop))          /* shouldn't happen */
-                               elog(ERROR, "could not find equality operator for ordering operator %u",
-                                        sortop);
+       return plan;
+ }
  
-                       tle = get_tle_by_resno(subplan->targetlist,
-                                                                  groupColIdx[groupColPos]);
-                       Assert(tle != NULL);
+ /*
+  * create_limit_plan
+  *
+  *      Create a Limit plan for 'best_path' and (recursively) plans
+  *      for its subpaths.
+  */
+ static Limit *
+ create_limit_plan(PlannerInfo *root, LimitPath *best_path, int flags)
+ {
+       Limit      *plan;
+       Plan       *subplan;
  
-                       sortcl = makeNode(SortGroupClause);
-                       sortcl->tleSortGroupRef = assignSortGroupRef(tle,
-                                                                                                                subplan->targetlist);
-                       sortcl->eqop = eqop;
-                       sortcl->sortop = sortop;
-                       sortcl->nulls_first = false;
-                       sortcl->hashable = false;       /* no need to make this accurate */
-                       sortList = lappend(sortList, sortcl);
-                       groupColPos++;
-               }
-               plan = (Plan *) make_sort_from_sortclauses(root, sortList, subplan);
-               plan = (Plan *) make_unique(plan, sortList);
-       }
+       /* Limit doesn't project, so tlist requirements pass through */
+       subplan = create_plan_recurse(root, best_path->subpath, flags);
+       plan = make_limit(subplan,
+                                         best_path->limitOffset,
+                                         best_path->limitCount);
  
-       /* Adjust output size estimate (other fields should be OK already) */
-       plan->plan_rows = best_path->path.rows;
+       copy_generic_path_info(&plan->plan, (Path *) best_path);
  
        return plan;
  }
@@@ -4314,10 -4905,11 +5517,12 @@@ make_foreignscan(List *qptlist
                                 Index scanrelid,
                                 List *fdw_exprs,
                                 List *fdw_private,
-                                List *fdw_scan_tlist)
+                                List *fdw_scan_tlist,
+                                List *fdw_recheck_quals,
+                                Plan *outer_plan)
  {
        ForeignScan *node = makeNode(ForeignScan);
 +
        Plan       *plan = &node->scan.plan;
  
        /* cost will be filled in by create_foreignscan_plan */
@@@ -4387,12 -4948,8 +5561,12 @@@ make_append(List *appendplans, List *tl
        return node;
  }
  
- RecursiveUnion *
static RecursiveUnion *
 -make_recursive_union(List *tlist,
 +make_recursive_union(
 +#ifdef XCP
 +                                       PlannerInfo *root,
 +#endif                                         
 +                                       List *tlist,
                                         Plan *lefttree,
                                         Plan *righttree,
                                         int wtParam,
        RecursiveUnion *node = makeNode(RecursiveUnion);
        Plan       *plan = &node->plan;
        int                     numCols = list_length(distinctList);
 +#ifdef XCP    
 +      RemoteSubplan *left_pushdown, *right_pushdown;
 +#endif        
  
-       cost_recursive_union(plan, lefttree, righttree);
        plan->targetlist = tlist;
        plan->qual = NIL;
        plan->lefttree = lefttree;
@@@ -4645,21 -5145,7 +5803,10 @@@ make_sort(Plan *lefttree, int numCols
  {
        Sort       *node = makeNode(Sort);
        Plan       *plan = &node->plan;
-       Path            sort_path;              /* dummy for result of cost_sort */
 +#ifdef XCP
 +      RemoteSubplan *pushdown;
 +#endif
  
-       copy_plan_costsize(plan, lefttree); /* only care about copying size */
-       cost_sort(&sort_path, root, NIL,
-                         lefttree->total_cost,
-                         lefttree->plan_rows,
-                         lefttree->plan_width,
-                         0.0,
-                         work_mem,
-                         limit_tuples);
-       plan->startup_cost = sort_path.startup_cost;
-       plan->total_cost = sort_path.total_cost;
        plan->targetlist = lefttree->targetlist;
        plan->qual = NIL;
        plan->lefttree = lefttree;
@@@ -5303,160 -5644,22 +6441,162 @@@ materialize_finished_plan(Plan *subplan
        return matplan;
  }
  
 +
 +#ifdef XCP
 +typedef struct
 +{
 +      List       *subtlist;
 +      List       *newtlist;
 +} find_referenced_cols_context;
 +
 +static bool
 +find_referenced_cols_walker(Node *node, find_referenced_cols_context *context)
 +{
 +      TargetEntry *tle;
 +
 +      if (node == NULL)
 +              return false;
 +      if (IsA(node, Aggref))
 +      {
 +              /*
 +               * We can not push down aggregates with DISTINCT.
 +               */
 +              if (((Aggref *) node)->aggdistinct)
 +                      return true;
 +
 +              /*
 +               * We can not push down aggregates with ORDER BY.
 +               */
 +              if (((Aggref *) node)->aggorder)
 +                      return true;
 +
 +              /*
 +               * We need to add aggregate reference to the new tlist if it
 +               * is not already there. Phase 1 aggregate is actually returns values
 +               * of transition data type, so we should change the data type of the
 +               * expression.
 +               */
 +              if (!tlist_member(node, context->newtlist))
 +              {
 +                      Aggref *aggref = (Aggref *) node;
 +                      Aggref *newagg;
 +                      TargetEntry *newtle;
 +                      HeapTuple       aggTuple;
 +                      Form_pg_aggregate aggform;
 +                      Oid     aggtranstype;
 +                      Oid     aggcollecttype;
 +
 +                      aggTuple = SearchSysCache1(AGGFNOID,
 +                                                                         ObjectIdGetDatum(aggref->aggfnoid));
 +                      if (!HeapTupleIsValid(aggTuple))
 +                              elog(ERROR, "cache lookup failed for aggregate %u",
 +                                       aggref->aggfnoid);
 +                      aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
 +                      aggtranstype = aggform->aggtranstype;
 +                      aggcollecttype = aggform->aggcollecttype;
 +                      ReleaseSysCache(aggTuple);
 +
 +                      /* Can not split two-phase aggregate */
 +                      if (!OidIsValid(aggcollecttype))
 +                              return true;
 +
 +                      if (IsPolymorphicType(aggtranstype))
 +                      {
 +                              Oid        *inputTypes;
 +                              Oid                *declaredArgTypes;
 +                              int                     agg_nargs;
 +                              int                     numArgs;
 +                              ListCell   *l;
 +
 +                              inputTypes = (Oid *) palloc(sizeof(Oid) * list_length(aggref->args));
 +                              numArgs = 0;
 +                              foreach(l, aggref->args)
 +                              {
 +                                      TargetEntry *tle = (TargetEntry *) lfirst(l);
 +
 +                                      if (!tle->resjunk)
 +                                              inputTypes[numArgs++] = exprType((Node *) tle->expr);
 +                              }
 +
 +                              /* have to fetch the agg's declared input types... */
 +                              (void) get_func_signature(aggref->aggfnoid,
 +                                                                                &declaredArgTypes, &agg_nargs);
 +                              Assert(agg_nargs == numArgs);
 +
 +
 +                              aggtranstype = enforce_generic_type_consistency(inputTypes,
 +                                                                                                                              declaredArgTypes,
 +                                                                                                                              agg_nargs,
 +                                                                                                                              aggtranstype,
 +                                                                                                                              false);
 +                              pfree(inputTypes);
 +                              pfree(declaredArgTypes);
 +                      }
 +                      newagg = copyObject(aggref);
 +                      newagg->aggtype = aggtranstype;
 +
 +                      newtle = makeTargetEntry((Expr *) newagg,
 +                                                                       list_length(context->newtlist) + 1,
 +                                                                       NULL,
 +                                                                       false);
 +                      context->newtlist = lappend(context->newtlist, newtle);
 +              }
 +
 +              return false;
 +      }
 +      /*
 +       * If expression is in the subtlist copy it into new tlist
 +       */
 +      tle = tlist_member(node, context->subtlist);
 +      if (tle && !tlist_member((Node *) tle->expr, context->newtlist))
 +      {
 +              TargetEntry *newtle;
 +              newtle = makeTargetEntry((Expr *) copyObject(node),
 +                                                               list_length(context->newtlist) + 1,
 +                                                               tle->resname,
 +                                                               false);
 +              context->newtlist = lappend(context->newtlist, newtle);
 +              return false;
 +      }
 +      if (IsA(node, Var))
 +      {
 +              /*
 +               * Referenced Var is not a member of subtlist.
 +               * Go ahead and add junk one.
 +               */
 +              TargetEntry *newtle;
 +              newtle = makeTargetEntry((Expr *) copyObject(node),
 +                                                               list_length(context->newtlist) + 1,
 +                                                               NULL,
 +                                                               true);
 +              context->newtlist = lappend(context->newtlist, newtle);
 +              return false;
 +      }
 +      return expression_tree_walker(node, find_referenced_cols_walker,
 +                                                                (void *) context);
 +}
 +#endif
 +
 +
  Agg *
- make_agg(PlannerInfo *root, List *tlist, List *qual,
-                AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
+ make_agg(List *tlist, List *qual,
+                AggStrategy aggstrategy, AggSplit aggsplit,
                 int numGroupCols, AttrNumber *grpColIdx, Oid *grpOperators,
-                List *groupingSets,
-                long numGroups,
-                Plan *lefttree)
+                List *groupingSets, List *chain,
+                double dNumGroups, Plan *lefttree)
  {
        Agg                *node = makeNode(Agg);
        Plan       *plan = &node->plan;
-       Path            agg_path;               /* dummy for result of cost_agg */
-       QualCost        qual_cost;
 +#ifdef XCP
 +      RemoteSubplan *pushdown;
 +#endif
+       long            numGroups;
+       /* Reduce to long, but 'ware overflow! */
+       numGroups = (long) Min(dNumGroups, (double) LONG_MAX);
  
        node->aggstrategy = aggstrategy;
+       node->aggsplit = aggsplit;
        node->numCols = numGroupCols;
        node->grpColIdx = grpColIdx;
        node->grpOperators = grpOperators;
@@@ -5769,25 -5743,7 +6815,10 @@@ make_unique_from_sortclauses(Plan *left
        AttrNumber *uniqColIdx;
        Oid                *uniqOperators;
        ListCell   *slitem;
 +#ifdef XCP
 +      RemoteSubplan *pushdown;
 +#endif
  
-       copy_plan_costsize(plan, lefttree);
-       /*
-        * Charge one cpu_operator_cost per comparison per input tuple. We assume
-        * all columns get compared at most of the tuples.  (XXX probably this is
-        * an overestimate.)
-        */
-       plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
-       /*
-        * plan->plan_rows is left as a copy of the input subplan's plan_rows; ie,
-        * we assume the filter removes nothing.  The caller must alter this if he
-        * has a better idea.
-        */
        plan->targetlist = lefttree->targetlist;
        plan->qual = NIL;
        plan->lefttree = lefttree;
@@@ -5945,59 -5985,7 +7084,10 @@@ make_limit(Plan *lefttree, Node *limitO
  {
        Limit      *node = makeNode(Limit);
        Plan       *plan = &node->plan;
 +#ifdef XCP
 +      RemoteSubplan *pushdown;
 +#endif
  
-       copy_plan_costsize(plan, lefttree);
-       /*
-        * Adjust the output rows count and costs according to the offset/limit.
-        * This is only a cosmetic issue if we are at top level, but if we are
-        * building a subquery then it's important to report correct info to the
-        * outer planner.
-        *
-        * When the offset or count couldn't be estimated, use 10% of the
-        * estimated number of rows emitted from the subplan.
-        */
-       if (offset_est != 0)
-       {
-               double          offset_rows;
-               if (offset_est > 0)
-                       offset_rows = (double) offset_est;
-               else
-                       offset_rows = clamp_row_est(lefttree->plan_rows * 0.10);
-               if (offset_rows > plan->plan_rows)
-                       offset_rows = plan->plan_rows;
-               if (plan->plan_rows > 0)
-                       plan->startup_cost +=
-                               (plan->total_cost - plan->startup_cost)
-                               * offset_rows / plan->plan_rows;
-               plan->plan_rows -= offset_rows;
-               if (plan->plan_rows < 1)
-                       plan->plan_rows = 1;
-       }
-       if (count_est != 0)
-       {
-               double          count_rows;
-               if (count_est > 0)
-                       count_rows = (double) count_est;
-               else
-                       count_rows = clamp_row_est(lefttree->plan_rows * 0.10);
-               if (count_rows > plan->plan_rows)
-                       count_rows = plan->plan_rows;
-               if (plan->plan_rows > 0)
-                       plan->total_cost = plan->startup_cost +
-                               (plan->total_cost - plan->startup_cost)
-                               * count_rows / plan->plan_rows;
-               plan->plan_rows = count_rows;
-               if (plan->plan_rows < 1)
-                       plan->plan_rows = 1;
-       }
        plan->targetlist = lefttree->targetlist;
        plan->qual = NIL;
        plan->lefttree = lefttree;
index 1cd25327b9e635df5314cb6eccd0708b44e0e2d9,805aae7ee7a79b3486b74bbe1f866fc112c7a617..f7d6dace59deacefdb925305b708d1577d5ff8cc
@@@ -17,8 -17,7 +17,8 @@@
   * scan all the rows anyway.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index f7540946ee98203bbaa747ac80e115d814ab8a3d,b265628325146d71d4e4e8f71ce84e37614d9f52..9d18de225ecd9292f40d6b73d4a9283f59aaebaa
@@@ -3,8 -3,7 +3,8 @@@
   * planner.c
   *      The query optimizer external interface.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  #include "parser/parsetree.h"
  #include "parser/parse_agg.h"
  #include "rewrite/rewriteManip.h"
+ #include "storage/dsm_impl.h"
  #include "utils/rel.h"
 +#ifdef PGXC
 +#include "commands/prepare.h"
 +#include "pgxc/pgxc.h"
 +#include "pgxc/planner.h"
 +#endif
  #include "utils/selfuncs.h"
+ #include "utils/lsyscache.h"
+ #include "utils/syscache.h"
  
  
- /* GUC parameter */
+ /* GUC parameters */
  double                cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
+ int                   force_parallel_mode = FORCE_PARALLEL_OFF;
  
  /* Hook for plugins to get control in planner() */
  planner_hook_type planner_hook = NULL;
@@@ -94,57 -104,55 +110,62 @@@ static List *preprocess_groupclause(Pla
  static List *extract_rollup_sets(List *groupingSets);
  static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
  static void standard_qp_callback(PlannerInfo *root, void *extra);
- static bool choose_hashed_grouping(PlannerInfo *root,
-                                          double tuple_fraction, double limit_tuples,
-                                          double path_rows, int path_width,
-                                          Path *cheapest_path, Path *sorted_path,
-                                          double dNumGroups, AggClauseCosts *agg_costs);
- static bool choose_hashed_distinct(PlannerInfo *root,
-                                          double tuple_fraction, double limit_tuples,
-                                          double path_rows, int path_width,
-                                          Cost cheapest_startup_cost, Cost cheapest_total_cost,
-                                          Cost sorted_startup_cost, Cost sorted_total_cost,
-                                          List *sorted_pathkeys,
-                                          double dNumDistinctRows);
- static List *make_subplanTargetList(PlannerInfo *root, List *tlist,
-                                          AttrNumber **groupColIdx, bool *need_tlist_eval);
- static int    get_grouping_column_index(Query *parse, TargetEntry *tle);
- static void locate_grouping_columns(PlannerInfo *root,
-                                               List *tlist,
-                                               List *sub_tlist,
-                                               AttrNumber *groupColIdx);
+ static double get_number_of_groups(PlannerInfo *root,
+                                        double path_rows,
+                                        List *rollup_lists,
+                                        List *rollup_groupclauses);
+ static Size estimate_hashagg_tablesize(Path *path,
+                                                  const AggClauseCosts *agg_costs,
+                                                  double dNumGroups);
+ static RelOptInfo *create_grouping_paths(PlannerInfo *root,
+                                         RelOptInfo *input_rel,
+                                         PathTarget *target,
+                                         const AggClauseCosts *agg_costs,
+                                         List *rollup_lists,
+                                         List *rollup_groupclauses);
+ static RelOptInfo *create_window_paths(PlannerInfo *root,
+                                       RelOptInfo *input_rel,
+                                       PathTarget *input_target,
+                                       PathTarget *output_target,
+                                       List *tlist,
+                                       WindowFuncLists *wflists,
+                                       List *activeWindows);
+ static void create_one_window_path(PlannerInfo *root,
+                                          RelOptInfo *window_rel,
+                                          Path *path,
+                                          PathTarget *input_target,
+                                          PathTarget *output_target,
+                                          List *tlist,
+                                          WindowFuncLists *wflists,
+                                          List *activeWindows);
+ static RelOptInfo *create_distinct_paths(PlannerInfo *root,
+                                         RelOptInfo *input_rel);
+ static RelOptInfo *create_ordered_paths(PlannerInfo *root,
+                                        RelOptInfo *input_rel,
+                                        PathTarget *target,
+                                        double limit_tuples);
+ static PathTarget *make_group_input_target(PlannerInfo *root,
+                                               PathTarget *final_target);
+ static PathTarget *make_partial_grouping_target(PlannerInfo *root,
+                                                        PathTarget *grouping_target);
  static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
  static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
- static List *make_windowInputTargetList(PlannerInfo *root,
-                                                  List *tlist, List *activeWindows);
+ static PathTarget *make_window_input_target(PlannerInfo *root,
+                                                PathTarget *final_target,
+                                                List *activeWindows);
  static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
                                                 List *tlist);
- static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc,
-                                                  List *tlist,
-                                                  int numSortCols, AttrNumber *sortColIdx,
-                                                  int *partNumCols,
-                                                  AttrNumber **partColIdx,
-                                                  Oid **partOperators,
-                                                  int *ordNumCols,
-                                                  AttrNumber **ordColIdx,
-                                                  Oid **ordOperators);
 +#ifdef XCP
 +static Plan *grouping_distribution(PlannerInfo *root, Plan *plan,
 +                                        int numGroupCols, AttrNumber *groupColIdx,
 +                                        List *current_pathkeys, Distribution **distribution);
 +static bool equal_distributions(PlannerInfo *root, Distribution *dst1,
 +                                      Distribution *dst2);
 +#endif
- static Plan *build_grouping_chain(PlannerInfo *root,
-                                        Query *parse,
-                                        List *tlist,
-                                        bool need_sort_for_grouping,
-                                        List *rollup_groupclauses,
-                                        List *rollup_lists,
-                                        AttrNumber *groupColIdx,
-                                        AggClauseCosts *agg_costs,
-                                        long numGroups,
-                                        Plan *result_plan);
+ static PathTarget *make_sort_input_target(PlannerInfo *root,
+                                          PathTarget *final_target,
+                                          bool *have_postponed_srfs);
  
  /*****************************************************************************
   *
@@@ -254,16 -289,14 +317,22 @@@ standard_planner(Query *parse, int curs
        }
  
        /* primary planning entry point (may recurse for subqueries) */
-       top_plan = subquery_planner(glob, parse, NULL,
-                                                               false, tuple_fraction, &root);
+       root = subquery_planner(glob, parse, NULL,
+                                                       false, tuple_fraction);
+       /* Select best Path and turn it into a Plan */
+       final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
+       best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
+       top_plan = create_plan(root, best_path);
 +#ifdef XCP
 +      if (root->distribution)
 +      {
 +              top_plan = (Plan *) make_remotesubplan(root, top_plan, NULL,
 +                                                                                         root->distribution,
 +                                                                                         root->query_pathkeys);
 +      }
 +#endif
  
        /*
         * If creating a plan for a scrollable cursor, make sure it can run
        result->rowMarks = glob->finalrowmarks;
        result->relationOids = glob->relationOids;
        result->invalItems = glob->invalItems;
 +#ifdef XCP
 +      result->distributionType = LOCATOR_TYPE_NONE;
 +      result->distributionKey = InvalidAttrNumber;
 +      result->distributionNodes = NULL;
 +#endif
        result->nParamExec = glob->nParamExec;
-       result->hasRowSecurity = glob->hasRowSecurity;
  
        return result;
  }
@@@ -358,9 -458,8 +499,9 @@@ subquery_planner(PlannerGlobal *glob, Q
        List       *newWithCheckOptions;
        List       *newHaving;
        bool            hasOuterJoins;
+       RelOptInfo *final_rel;
        ListCell   *l;
 +      bool recursiveOk = true;
  
        /* Create a PlannerInfo data structure for this subquery */
        root = makeNode(PlannerInfo);
        root->eq_classes = NIL;
        root->append_rel_list = NIL;
        root->rowMarks = NIL;
-       root->hasInheritedTarget = false;
+       memset(root->upper_rels, 0, sizeof(root->upper_rels));
+       memset(root->upper_targets, 0, sizeof(root->upper_targets));
+       root->processed_tlist = NIL;
        root->grouping_map = NULL;
 +      root->recursiveOk = true;
 +
+       root->minmax_aggs = NIL;
+       root->hasInheritedTarget = false;
        root->hasRecursion = hasRecursion;
        if (hasRecursion)
                root->wt_param_id = SS_assign_special_param(root);
         */
        if (parse->resultRelation &&
                rt_fetch(parse->resultRelation, parse->rtable)->inh)
-               plan = inheritance_planner(root);
+               inheritance_planner(root);
        else
-       {
-               plan = grouping_planner(root, tuple_fraction);
-               /* If it's not SELECT, we need a ModifyTable node */
-               if (parse->commandType != CMD_SELECT)
-               {
-                       List       *withCheckOptionLists;
-                       List       *returningLists;
-                       List       *rowMarks;
-                       /*
-                        * Set up the WITH CHECK OPTION and RETURNING lists-of-lists, if
-                        * needed.
-                        */
-                       if (parse->withCheckOptions)
-                               withCheckOptionLists = list_make1(parse->withCheckOptions);
-                       else
-                               withCheckOptionLists = NIL;
-                       if (parse->returningList)
-                               returningLists = list_make1(parse->returningList);
-                       else
-                               returningLists = NIL;
-                       /*
-                        * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
-                        * will have dealt with fetching non-locked marked rows, else we
-                        * need to have ModifyTable do that.
-                        */
-                       if (parse->rowMarks)
-                               rowMarks = NIL;
-                       else
-                               rowMarks = root->rowMarks;
+               grouping_planner(root, false, tuple_fraction);
 -
+       /*
+        * Capture the set of outer-level param IDs we have access to, for use in
+        * extParam/allParam calculations later.
+        */
+       SS_identify_outer_params(root);
  
-                       if (root->query_level > 1)
-                               ereport(ERROR,
-                                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                errmsg("INSERT/UPDATE/DELETE is not supported in subquery")));
 +#ifdef XCP
-                       plan = (Plan *) make_modifytable(root,
-                                                                                        parse->commandType,
-                                                                                        parse->canSetTag,
-                                                                                        parse->resultRelation,
-                                                                          list_make1_int(parse->resultRelation),
-                                                                                        list_make1(plan),
-                                                                                        withCheckOptionLists,
-                                                                                        returningLists,
-                                                                                        rowMarks,
-                                                                                        parse->onConflict,
-                                                                                        SS_assign_special_param(root));
-               }
-       }
++      if (root->query_level > 1)
++              ereport(ERROR,
++                              (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
++                               errmsg("INSERT/UPDATE/DELETE is not supported in subquery")));
 +#endif
 +
++      plan = (Plan *) make_modifytable(root,
++                      parse->commandType,
++                      parse->canSetTag,
++                      parse->resultRelation,
++                      list_make1_int(parse->resultRelation),
++                      list_make1(plan),
++                      withCheckOptionLists,
++                      returningLists,
++                      rowMarks,
++                      parse->onConflict,
++                      SS_assign_special_param(root));
 +
        /*
-        * If any subplans were generated, or if there are any parameters to worry
-        * about, build initPlan list and extParam/allParam sets for plan nodes,
-        * and attach the initPlans to the top plan node.
+        * If any initPlans were created in this query level, increment the
+        * surviving Paths' costs to account for them.  They won't actually get
+        * attached to the plan tree till create_plan() runs, but we want to be
+        * sure their costs are included now.
         */
-       if (list_length(glob->subplans) != num_old_subplans ||
-               root->glob->nParamExec > 0)
-               SS_finalize_plan(root, plan, true);
+       final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
+       SS_charge_for_initplans(root, final_rel);
  
-       /* Return internal info if caller wants it */
-       if (subroot)
-               *subroot = root;
+       /*
+        * Make sure we've identified the cheapest Path for the final rel.  (By
+        * doing this here not in grouping_planner, we include initPlan costs in
+        * the decision, though it's unlikely that will change anything.)
+        */
+       set_cheapest(final_rel);
  
-       return plan;
 +      /* 
 +       * XCPTODO      
 +       * Temporarily block WITH RECURSIVE for most cases 
 +       * until we can fix. Allow for pg_catalog tables and replicated tables.
 +       */
 +      {
 +              int idx;
 +              recursiveOk = true;
 +
 +              /* seems to start at 1... */
 +              for (idx = 1; idx < root->simple_rel_array_size - 1 && recursiveOk; idx++)
 +              {
 +                      RangeTblEntry *rte;
 +
 +                      rte = planner_rt_fetch(idx, root);
 +                      if (!rte)
 +                         continue;
 +              
 +                      switch (rte->rtekind)
 +                      {
 +                              case RTE_JOIN:
 +                              case RTE_VALUES:
 +                              case RTE_CTE:
 +                                      continue;
 +                              case RTE_RELATION:
 +                                      {
 +                                              char loc_type;
 +
 +                                              loc_type = GetRelationLocType(rte->relid);
 +
 +                                              /* skip pg_catalog */
 +                                              if (loc_type == LOCATOR_TYPE_NONE)
 +                                                      continue;
 +
 +                                              /* If replicated, allow */
 +                                              if (IsLocatorReplicated(loc_type))
 +                                                      continue;
 +                                              else
 +                                                      recursiveOk = false;
 +                                              break;
 +                                      } 
 +                              case RTE_SUBQUERY:
 +                                      {
 +                                              RelOptInfo *relOptInfo = root->simple_rel_array[idx];
 +                                              if (relOptInfo && relOptInfo->subroot &&
 +                                                              !relOptInfo->subroot->recursiveOk)
 +                                                      recursiveOk = false;
 +                                              break;
 +                                      }
 +                              default:        
 +                                      recursiveOk = false;
 +                                      break;
 +                      }
 +              }
 +      }
 +
 +      if (root->recursiveOk)
 +              root->recursiveOk = recursiveOk;
 +
 +      if (root->hasRecursion && !root->recursiveOk)
 +                      elog(ERROR, "WITH RECURSIVE currently not supported on distributed tables.");
 +
+       return root;
  }
  
  /*
@@@ -1200,44 -1232,9 +1356,44 @@@ inheritance_planner(PlannerInfo *root
                 * If this child rel was excluded by constraint exclusion, exclude it
                 * from the result plan.
                 */
-               if (is_dummy_plan(subplan))
+               if (IS_DUMMY_PATH(subpath))
                        continue;
  
-               subplans = lappend(subplans, subplan);
 +#ifdef XCP
 +              /*
 +               * All subplans should have the same distribution, except may be
 +               * restriction. At the moment this is always the case but if this
 +               * is changed we should handle inheritance differently.
 +               * Effectively we want to push the modify table down to data nodes, if
 +               * it is running against distributed inherited tables. To achieve this
 +               * we are building up distribution of the query from distributions of
 +               * the subplans.
 +               * If subplans are restricted to different nodes we should union these
 +               * restrictions, if at least one subplan is not restricted we should
 +               * not restrict parent plan.
 +               * After returning a plan from the function valid root->distribution
 +               * value will force proper RemoteSubplan node on top of it.
 +               */
 +              if (root->distribution == NULL)
 +                      root->distribution = subroot.distribution;
 +              else if (!bms_is_empty(root->distribution->restrictNodes))
 +              {
 +                      if (bms_is_empty(subroot.distribution->restrictNodes))
 +                      {
 +                              bms_free(root->distribution->restrictNodes);
 +                              root->distribution->restrictNodes = NULL;
 +                      }
 +                      else
 +                      {
 +                              root->distribution->restrictNodes = bms_join(
 +                                              root->distribution->restrictNodes,
 +                                              subroot.distribution->restrictNodes);
 +                              subroot.distribution->restrictNodes = NULL;
 +                      }
 +              }
 +#endif
++              subroots = lappend(subroots, subroot);
 +
                /*
                 * If this is the first non-excluded child, its post-planning rtable
                 * becomes the initial contents of final_rtable; otherwise, append
@@@ -1395,14 -1420,12 +1579,15 @@@ grouping_planner(PlannerInfo *root, boo
        int64           offset_est = 0;
        int64           count_est = 0;
        double          limit_tuples = -1.0;
-       Plan       *result_plan;
-       List       *current_pathkeys;
-       double          dNumGroups = 0;
-       bool            use_hashed_distinct = false;
-       bool            tested_hashed_distinct = false;
 +#ifdef XCP
 +      Distribution *distribution = NULL; /* distribution of the result_plan */
 +#endif
+       bool            have_postponed_srfs = false;
+       double          tlist_rows;
+       PathTarget *final_target;
+       RelOptInfo *current_rel;
+       RelOptInfo *final_rel;
+       ListCell   *lc;
  
        /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
        if (parse->limitCount || parse->limitOffset)
                }
  
                /*
-                * Select the best path.  If we are doing hashed grouping, we will
-                * always read all the input tuples, so use the cheapest-total path.
-                * Otherwise, the comparison above is correct.
+                * Save the various upper-rel PathTargets we just computed into
+                * root->upper_targets[].  The core code doesn't use this, but it
+                * provides a convenient place for extensions to get at the info.  For
+                * consistency, we save all the intermediate targets, even though some
+                * of the corresponding upperrels might not be needed for this query.
                 */
-               if (use_hashed_grouping || use_hashed_distinct || !sorted_path)
-                       best_path = cheapest_path;
-               else
-                       best_path = sorted_path;
+               root->upper_targets[UPPERREL_FINAL] = final_target;
+               root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
+               root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
  
                /*
-                * Check to see if it's possible to optimize MIN/MAX aggregates. If
-                * so, we will forget all the work we did so far to choose a "regular"
-                * path ... but we had to do it anyway to be able to tell which way is
-                * cheaper.
+                * If we have grouping and/or aggregation, consider ways to implement
+                * that.  We build a new upperrel representing the output of this
+                * phase.
                 */
-               result_plan = optimize_minmax_aggregates(root,
-                                                                                                tlist,
-                                                                                                &agg_costs,
-                                                                                                best_path);
-               if (result_plan != NULL)
+               if (have_grouping)
                {
-                       /*
-                        * optimize_minmax_aggregates generated the full plan, with the
-                        * right tlist, and it has no sort order.
-                        */
-                       current_pathkeys = NIL;
+                       current_rel = create_grouping_paths(root,
+                                                                                               current_rel,
+                                                                                               grouping_target,
+                                                                                               &agg_costs,
+                                                                                               rollup_lists,
+                                                                                               rollup_groupclauses);
                }
-               else
+               /*
+                * If we have window functions, consider ways to implement those.  We
+                * build a new upperrel representing the output of this phase.
+                */
+               if (activeWindows)
                {
-                       /*
-                        * Normal case --- create a plan according to query_planner's
-                        * results.
-                        */
-                       bool            need_sort_for_grouping = false;
+                       current_rel = create_window_paths(root,
+                                                                                         current_rel,
+                                                                                         grouping_target,
+                                                                                         sort_input_target,
+                                                                                         tlist,
+                                                                                         wflists,
+                                                                                         activeWindows);
+               }
  
-                       result_plan = create_plan(root, best_path);
-                       current_pathkeys = best_path->pathkeys;
 +#ifdef XCP
-                       distribution = best_path->distribution;
++              distribution = best_path->distribution;
 +#endif
+               /*
+                * If there is a DISTINCT clause, consider ways to implement that. We
+                * build a new upperrel representing the output of this phase.
+                */
+               if (parse->distinctClause)
+               {
+                       current_rel = create_distinct_paths(root,
+                                                                                               current_rel);
+               }
  
-                       /* Detect if we'll need an explicit sort for grouping */
-                       if (parse->groupClause && !use_hashed_grouping &&
-                         !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
-                       {
-                               need_sort_for_grouping = true;
+       }                                                       /* end of if (setOperations) */
  
++<<<<<<< HEAD
 +                              /*
 +                               * Always override create_plan's tlist, so that we don't sort
 +                               * useless data from a "physical" tlist.
 +                               */
 +                              need_tlist_eval = true;
 +                      }
 +
 +                      /*
 +                       * create_plan returns a plan with just a "flat" tlist of required
 +                       * Vars.  Usually we need to insert the sub_tlist as the tlist of
 +                       * the top plan node.  However, we can skip that if we determined
 +                       * that whatever create_plan chose to return will be good enough.
 +                       */
 +                      if (need_tlist_eval)
 +                      {
 +                              /*
 +                               * If the top-level plan node is one that cannot do expression
 +                               * evaluation and its existing target list isn't already what
 +                               * we need, we must insert a Result node to project the
 +                               * desired tlist.
 +                               */
 +                              if (!is_projection_capable_plan(result_plan) &&
 +                                      !tlist_same_exprs(sub_tlist, result_plan->targetlist))
 +                              {
 +                                      result_plan = (Plan *) make_result(root,
 +                                                                                                         sub_tlist,
 +                                                                                                         NULL,
 +                                                                                                         result_plan);
 +                              }
 +                              else
 +                              {
 +                                      /*
 +                                       * Otherwise, just replace the subplan's flat tlist with
 +                                       * the desired tlist.
 +                                       */
 +                                      result_plan->targetlist = sub_tlist;
 +                              }
 +#ifdef XCP
 +                              /*
 +                               * RemoteSubplan is conditionally projection capable - it is
 +                               * pushing projection to the data nodes
 +                               */
 +                              if (IsA(result_plan, RemoteSubplan))
 +                                      result_plan->lefttree->targetlist = sub_tlist;
 +#endif
 +
 +                              /*
 +                               * Also, account for the cost of evaluation of the sub_tlist.
 +                               * See comments for add_tlist_costs_to_plan() for more info.
 +                               */
 +                              add_tlist_costs_to_plan(root, result_plan, sub_tlist);
 +                      }
 +                      else
 +                      {
 +                              /*
 +                               * Since we're using create_plan's tlist and not the one
 +                               * make_subplanTargetList calculated, we have to refigure any
 +                               * grouping-column indexes make_subplanTargetList computed.
 +                               */
 +                              locate_grouping_columns(root, tlist, result_plan->targetlist,
 +                                                                              groupColIdx);
 +                      }
 +
 +                      /*
 +                       * groupColIdx is now cast in stone, so record a mapping from
 +                       * tleSortGroupRef to column index. setrefs.c needs this to
 +                       * finalize GROUPING() operations.
 +                       */
 +
 +                      if (parse->groupingSets)
 +                      {
 +                              AttrNumber *grouping_map = palloc0(sizeof(AttrNumber) * (maxref + 1));
 +                              ListCell   *lc;
 +                              int                     i = 0;
 +
 +                              foreach(lc, parse->groupClause)
 +                              {
 +                                      SortGroupClause *gc = lfirst(lc);
 +
 +                                      grouping_map[gc->tleSortGroupRef] = groupColIdx[i++];
 +                              }
 +
 +                              root->grouping_map = grouping_map;
 +                      }
 +
 +                      /*
 +                       * Insert AGG or GROUP node if needed, plus an explicit sort step
 +                       * if necessary.
 +                       *
 +                       * HAVING clause, if any, becomes qual of the Agg or Group node.
 +                       */
 +                      if (use_hashed_grouping)
 +                      {
 +#ifdef XCP
 +                              result_plan = grouping_distribution(root, result_plan,
 +                                                                                                      numGroupCols, groupColIdx,
 +                                                                                                      current_pathkeys,
 +                                                                                                      &distribution);
 +#endif
 +                              /* Hashed aggregate plan --- no sort needed */
 +                              result_plan = (Plan *) make_agg(root,
 +                                                                                              tlist,
 +                                                                                              (List *) parse->havingQual,
 +                                                                                              AGG_HASHED,
 +                                                                                              &agg_costs,
 +                                                                                              numGroupCols,
 +                                                                                              groupColIdx,
 +                                                                      extract_grouping_ops(parse->groupClause),
 +                                                                                              NIL,
 +                                                                                              numGroups,
 +                                                                                              result_plan);
 +                              /* Hashed aggregation produces randomly-ordered results */
 +                              current_pathkeys = NIL;
 +                      }
 +                      else if (parse->hasAggs || (parse->groupingSets && parse->groupClause))
 +                      {
 +                              /*
 +                               * Output is in sorted order by group_pathkeys if, and only
 +                               * if, there is a single rollup operation on a non-empty list
 +                               * of grouping expressions.
 +                               */
 +                              if (list_length(rollup_groupclauses) == 1
 +                                      && list_length(linitial(rollup_groupclauses)) > 0)
 +                                      current_pathkeys = root->group_pathkeys;
 +                              else
 +                                      current_pathkeys = NIL;
 +
 +#ifdef XCP
 +                              result_plan = grouping_distribution(root, result_plan,
 +                                                                                                      numGroupCols, groupColIdx,
 +                                                                                                      current_pathkeys,
 +                                                                                                      &distribution);
 +#endif
 +                              result_plan = build_grouping_chain(root,
 +                                                                                                 parse,
 +                                                                                                 tlist,
 +                                                                                                 need_sort_for_grouping,
 +                                                                                                 rollup_groupclauses,
 +                                                                                                 rollup_lists,
 +                                                                                                 groupColIdx,
 +                                                                                                 &agg_costs,
 +                                                                                                 numGroups,
 +                                                                                                 result_plan);
 +
 +                              /*
 +                               * these are destroyed by build_grouping_chain, so make sure
 +                               * we don't try and touch them again
 +                               */
 +                              rollup_groupclauses = NIL;
 +                              rollup_lists = NIL;
 +                      }
 +                      else if (parse->groupClause)
 +                      {
 +                              /*
 +                               * GROUP BY without aggregation, so insert a group node (plus
 +                               * the appropriate sort node, if necessary).
 +                               *
 +                               * Add an explicit sort if we couldn't make the path come out
 +                               * the way the GROUP node needs it.
 +                               */
 +                              if (need_sort_for_grouping)
 +                              {
 +                                      result_plan = (Plan *)
 +                                              make_sort_from_groupcols(root,
 +                                                                                               parse->groupClause,
 +                                                                                               groupColIdx,
 +                                                                                               result_plan);
 +                                      current_pathkeys = root->group_pathkeys;
 +                              }
 +
 +#ifdef XCP
 +                              result_plan = grouping_distribution(root, result_plan,
 +                                                                                                      numGroupCols, groupColIdx,
 +                                                                                                      current_pathkeys,
 +                                                                                                      &distribution);
 +#endif
 +                              result_plan = (Plan *) make_group(root,
 +                                                                                                tlist,
 +                                                                                                (List *) parse->havingQual,
 +                                                                                                numGroupCols,
 +                                                                                                groupColIdx,
 +                                                                      extract_grouping_ops(parse->groupClause),
 +                                                                                                dNumGroups,
 +                                                                                                result_plan);
 +                      }
 +                      else if (root->hasHavingQual || parse->groupingSets)
 +                      {
 +                              int                     nrows = list_length(parse->groupingSets);
 +
 +                              /*
 +                               * No aggregates, and no GROUP BY, but we have a HAVING qual
 +                               * or grouping sets (which by elimination of cases above must
 +                               * consist solely of empty grouping sets, since otherwise
 +                               * groupClause will be non-empty).
 +                               *
 +                               * This is a degenerate case in which we are supposed to emit
 +                               * either 0 or 1 row for each grouping set depending on
 +                               * whether HAVING succeeds.  Furthermore, there cannot be any
 +                               * variables in either HAVING or the targetlist, so we
 +                               * actually do not need the FROM table at all!  We can just
 +                               * throw away the plan-so-far and generate a Result node. This
 +                               * is a sufficiently unusual corner case that it's not worth
 +                               * contorting the structure of this routine to avoid having to
 +                               * generate the plan in the first place.
 +                               */
 +#ifdef XCP
 +                              result_plan = grouping_distribution(root, result_plan, 0, NULL,
 +                                                                                                      current_pathkeys,
 +                                                                                                      &distribution);
 +#endif
 +                              result_plan = (Plan *) make_result(root,
 +                                                                                                 tlist,
 +                                                                                                 parse->havingQual,
 +                                                                                                 NULL);
 +
 +                              /*
 +                               * Doesn't seem worthwhile writing code to cons up a
 +                               * generate_series or a values scan to emit multiple rows.
 +                               * Instead just clone the result in an Append.
 +                               */
 +                              if (nrows > 1)
 +                              {
 +                                      List       *plans = list_make1(result_plan);
 +
 +                                      while (--nrows > 0)
 +                                              plans = lappend(plans, copyObject(result_plan));
 +
 +                                      result_plan = (Plan *) make_append(plans, tlist);
 +                              }
 +                      }
 +              }                                               /* end of non-minmax-aggregate case */
++=======
+       /*
+        * If ORDER BY was given, consider ways to implement that, and generate a
+        * new upperrel containing only paths that emit the correct ordering and
+        * project the correct final_target.  We can apply the original
+        * limit_tuples limit in sort costing here, but only if there are no
+        * postponed SRFs.
+        */
+       if (parse->sortClause)
+       {
+               current_rel = create_ordered_paths(root,
+                                                                                  current_rel,
+                                                                                  final_target,
+                                                                                  have_postponed_srfs ? -1.0 :
+                                                                                  limit_tuples);
+       }
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
  
-               /*
-                * Since each window function could require a different sort order, we
-                * stack up a WindowAgg node for each window, with sort steps between
-                * them as needed.
-                */
-               if (activeWindows)
+       /*
+        * If there are set-returning functions in the tlist, scale up the output
+        * rowcounts of all surviving Paths to account for that.  Note that if any
+        * SRFs appear in sorting or grouping columns, we'll have underestimated
+        * the numbers of rows passing through earlier steps; but that's such a
+        * weird usage that it doesn't seem worth greatly complicating matters to
+        * account for it.
+        */
+       tlist_rows = tlist_returns_set_rows(tlist);
+       if (tlist_rows > 1)
+       {
+               foreach(lc, current_rel->pathlist)
                {
-                       List       *window_tlist;
-                       ListCell   *l;
-                       /*
-                        * If the top-level plan node is one that cannot do expression
-                        * evaluation, we must insert a Result node to project the desired
-                        * tlist.  (In some cases this might not really be required, but
-                        * it's not worth trying to avoid it.  In particular, think not to
-                        * skip adding the Result if the initial window_tlist matches the
-                        * top-level plan node's output, because we might change the tlist
-                        * inside the following loop.)  Note that on second and subsequent
-                        * passes through the following loop, the top-level node will be a
-                        * WindowAgg which we know can project; so we only need to check
-                        * once.
-                        */
-                       if (!is_projection_capable_plan(result_plan))
-                       {
-                               result_plan = (Plan *) make_result(root,
-                                                                                                  NIL,
-                                                                                                  NULL,
-                                                                                                  result_plan);
-                       }
+                       Path       *path = (Path *) lfirst(lc);
  
                        /*
-                        * The "base" targetlist for all steps of the windowing process is
-                        * a flat tlist of all Vars and Aggs needed in the result.  (In
-                        * some cases we wouldn't need to propagate all of these all the
-                        * way to the top, since they might only be needed as inputs to
-                        * WindowFuncs.  It's probably not worth trying to optimize that
-                        * though.)  We also add window partitioning and sorting
-                        * expressions to the base tlist, to ensure they're computed only
-                        * once at the bottom of the stack (that's critical for volatile
-                        * functions).  As we climb up the stack, we'll add outputs for
-                        * the WindowFuncs computed at each level.
+                        * We assume that execution costs of the tlist as such were
+                        * already accounted for.  However, it still seems appropriate to
+                        * charge something more for the executor's general costs of
+                        * processing the added tuples.  The cost is probably less than
+                        * cpu_tuple_cost, though, so we arbitrarily use half of that.
                         */
-                       window_tlist = make_windowInputTargetList(root,
-                                                                                                         tlist,
-                                                                                                         activeWindows);
+                       path->total_cost += path->rows * (tlist_rows - 1) *
+                               cpu_tuple_cost / 2;
  
++<<<<<<< HEAD
 +                      /*
 +                       * The copyObject steps here are needed to ensure that each plan
 +                       * node has a separately modifiable tlist.  (XXX wouldn't a
 +                       * shallow list copy do for that?)
 +                       */
 +                      result_plan->targetlist = (List *) copyObject(window_tlist);
 +#ifdef XCP
 +                      /*
 +                       * We can not guarantee correct result of windowing function
 +                       * if aggregation is pushed down to Datanodes. So if current plan
 +                       * produces a distributed result set we should bring it to
 +                       * coordinator.
 +                       */
 +                      if (distribution)
 +                      {
 +                              result_plan = (Plan *)
 +                                              make_remotesubplan(root, result_plan, NULL,
 +                                                                                 distribution, current_pathkeys);
 +                              distribution = NULL;
 +                      }
 +#endif
 +
 +                      foreach(l, activeWindows)
 +                      {
 +                              WindowClause *wc = (WindowClause *) lfirst(l);
 +                              List       *window_pathkeys;
 +                              int                     partNumCols;
 +                              AttrNumber *partColIdx;
 +                              Oid                *partOperators;
 +                              int                     ordNumCols;
 +                              AttrNumber *ordColIdx;
 +                              Oid                *ordOperators;
 +
 +                              window_pathkeys = make_pathkeys_for_window(root,
 +                                                                                                                 wc,
 +                                                                                                                 tlist);
 +
 +                              /*
 +                               * This is a bit tricky: we build a sort node even if we don't
 +                               * really have to sort.  Even when no explicit sort is needed,
 +                               * we need to have suitable resjunk items added to the input
 +                               * plan's tlist for any partitioning or ordering columns that
 +                               * aren't plain Vars.  (In theory, make_windowInputTargetList
 +                               * should have provided all such columns, but let's not assume
 +                               * that here.)  Furthermore, this way we can use existing
 +                               * infrastructure to identify which input columns are the
 +                               * interesting ones.
 +                               */
 +                              if (window_pathkeys)
 +                              {
 +                                      Sort       *sort_plan;
 +
 +                                      sort_plan = make_sort_from_pathkeys(root,
 +                                                                                                              result_plan,
 +                                                                                                              window_pathkeys,
 +                                                                                                              -1.0);
 +                                      if (!pathkeys_contained_in(window_pathkeys,
 +                                                                                         current_pathkeys))
 +                                      {
 +                                              /* we do indeed need to sort */
 +                                              result_plan = (Plan *) sort_plan;
 +                                              current_pathkeys = window_pathkeys;
 +                                      }
 +#ifdef XCP
 +                                      /*
 +                                       * In our code, Sort may be pushed down to the Datanodes,
 +                                       * and therefore we may get the sort_plan is not really a
 +                                       * Sort node. In this case we should get sort columns from
 +                                       * the top RemoteSubplan
 +                                       */
 +                                      if (!IsA(sort_plan, Sort))
 +                                      {
 +                                              RemoteSubplan *pushdown;
 +                                              pushdown = find_push_down_plan((Plan *)sort_plan, true);
 +                                              Assert(pushdown && pushdown->sort);
 +                                              get_column_info_for_window(root, wc, tlist,
 +                                                                                                 pushdown->sort->numCols,
 +                                                                                                 pushdown->sort->sortColIdx,
 +                                                                                                 &partNumCols,
 +                                                                                                 &partColIdx,
 +                                                                                                 &partOperators,
 +                                                                                                 &ordNumCols,
 +                                                                                                 &ordColIdx,
 +                                                                                                 &ordOperators);
 +                                      }
 +                                      else
 +#endif
 +                                      /* In either case, extract the per-column information */
 +                                      get_column_info_for_window(root, wc, tlist,
 +                                                                                         sort_plan->numCols,
 +                                                                                         sort_plan->sortColIdx,
 +                                                                                         &partNumCols,
 +                                                                                         &partColIdx,
 +                                                                                         &partOperators,
 +                                                                                         &ordNumCols,
 +                                                                                         &ordColIdx,
 +                                                                                         &ordOperators);
 +                              }
 +                              else
 +                              {
 +                                      /* empty window specification, nothing to sort */
 +                                      partNumCols = 0;
 +                                      partColIdx = NULL;
 +                                      partOperators = NULL;
 +                                      ordNumCols = 0;
 +                                      ordColIdx = NULL;
 +                                      ordOperators = NULL;
 +                              }
++=======
+                       path->rows *= tlist_rows;
+               }
+               /* No need to run set_cheapest; we're keeping all paths anyway. */
+       }
  
-                               if (lnext(l))
-                               {
-                                       /* Add the current WindowFuncs to the running tlist */
-                                       window_tlist = add_to_flat_tlist(window_tlist,
-                                                                                  wflists->windowFuncs[wc->winref]);
-                               }
-                               else
-                               {
-                                       /* Install the original tlist in the topmost WindowAgg */
-                                       window_tlist = tlist;
-                               }
+       /*
+        * Now we are prepared to build the final-output upperrel.
+        */
+       final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
  
-                               /* ... and make the WindowAgg plan node */
-                               result_plan = (Plan *)
-                                       make_windowagg(root,
-                                                                  (List *) copyObject(window_tlist),
-                                                                  wflists->windowFuncs[wc->winref],
-                                                                  wc->winref,
-                                                                  partNumCols,
-                                                                  partColIdx,
-                                                                  partOperators,
-                                                                  ordNumCols,
-                                                                  ordColIdx,
-                                                                  ordOperators,
-                                                                  wc->frameOptions,
-                                                                  wc->startOffset,
-                                                                  wc->endOffset,
-                                                                  result_plan);
-                       }
-               }
-       }                                                       /* end of if (setOperations) */
+       /*
+        * If the input rel is marked consider_parallel and there's nothing that's
+        * not parallel-safe in the LIMIT clause, then the final_rel can be marked
+        * consider_parallel as well.  Note that if the query has rowMarks or is
+        * not a SELECT, consider_parallel will be false for every relation in the
+        * query.
+        */
+       if (current_rel->consider_parallel &&
+               !has_parallel_hazard(parse->limitOffset, false) &&
+               !has_parallel_hazard(parse->limitCount, false))
+               final_rel->consider_parallel = true;
+       /*
+        * If the current_rel belongs to a single FDW, so does the final_rel.
+        */
+       final_rel->serverid = current_rel->serverid;
+       final_rel->userid = current_rel->userid;
+       final_rel->useridiscurrent = current_rel->useridiscurrent;
+       final_rel->fdwroutine = current_rel->fdwroutine;
  
        /*
-        * If there is a DISTINCT clause, add the necessary node(s).
+        * Generate paths for the final_rel.  Insert all surviving paths, with
+        * LockRows, Limit, and/or ModifyTable steps added if needed.
         */
-       if (parse->distinctClause)
+       foreach(lc, current_rel->pathlist)
        {
-               double          dNumDistinctRows;
-               long            numDistinctRows;
+               Path       *path = (Path *) lfirst(lc);
  
                /*
-                * If there was grouping or aggregation, use the current number of
-                * rows as the estimated number of DISTINCT rows (ie, assume the
-                * result was already mostly unique).  If not, use the number of
-                * distinct-groups calculated previously.
+                * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
+                * (Note: we intentionally test parse->rowMarks not root->rowMarks
+                * here.  If there are only non-locking rowmarks, they should be
+                * handled by the ModifyTable node instead.  However, root->rowMarks
+                * is what goes into the LockRows node.)
                 */
-               if (parse->groupClause || parse->groupingSets || root->hasHavingQual || parse->hasAggs)
-                       dNumDistinctRows = result_plan->plan_rows;
-               else
-                       dNumDistinctRows = dNumGroups;
-               /* Also convert to long int --- but 'ware overflow! */
-               numDistinctRows = (long) Min(dNumDistinctRows, (double) LONG_MAX);
-               /* Choose implementation method if we didn't already */
-               if (!tested_hashed_distinct)
+               if (parse->rowMarks)
                {
-                       /*
-                        * At this point, either hashed or sorted grouping will have to
-                        * work from result_plan, so we pass that as both "cheapest" and
-                        * "sorted".
-                        */
-                       use_hashed_distinct =
-                               choose_hashed_distinct(root,
-                                                                          tuple_fraction, limit_tuples,
-                                                                          result_plan->plan_rows,
-                                                                          result_plan->plan_width,
-                                                                          result_plan->startup_cost,
-                                                                          result_plan->total_cost,
-                                                                          result_plan->startup_cost,
-                                                                          result_plan->total_cost,
-                                                                          current_pathkeys,
-                                                                          dNumDistinctRows);
+                       path = (Path *) create_lockrows_path(root, final_rel, path,
+                                                                                                root->rowMarks,
+                                                                                         SS_assign_special_param(root));
                }
  
-               if (use_hashed_distinct)
+               /*
+                * If there is a LIMIT/OFFSET clause, add the LIMIT node.
+                */
+               if (limit_needed(parse))
                {
++<<<<<<< HEAD
 +#ifdef XCP
 +                      result_plan = grouping_distribution(root, result_plan,
 +                                                                                      list_length(parse->distinctClause),
 +                                                                 extract_grouping_cols(parse->distinctClause,
 +                                                                                                        result_plan->targetlist),
 +                                                                                              current_pathkeys,
 +                                                                                              &distribution);
 +#endif
 +                      /* Hashed aggregate plan --- no sort needed */
 +                      result_plan = (Plan *) make_agg(root,
 +                                                                                      result_plan->targetlist,
 +                                                                                      NIL,
 +                                                                                      AGG_HASHED,
 +                                                                                      NULL,
 +                                                                                list_length(parse->distinctClause),
 +                                                               extract_grouping_cols(parse->distinctClause,
 +                                                                                                      result_plan->targetlist),
 +                                                               extract_grouping_ops(parse->distinctClause),
 +                                                                                      NIL,
 +                                                                                      numDistinctRows,
 +                                                                                      result_plan);
 +                      /* Hashed aggregation produces randomly-ordered results */
 +                      current_pathkeys = NIL;
++=======
+                       path = (Path *) create_limit_path(root, final_rel, path,
+                                                                                         parse->limitOffset,
+                                                                                         parse->limitCount,
+                                                                                         offset_est, count_est);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
                }
-               else
+               /*
+                * If this is an INSERT/UPDATE/DELETE, and we're not being called from
+                * inheritance_planner, add the ModifyTable node.
+                */
+               if (parse->commandType != CMD_SELECT && !inheritance_update)
                {
+                       List       *withCheckOptionLists;
+                       List       *returningLists;
+                       List       *rowMarks;
                        /*
-                        * Use a Unique node to implement DISTINCT.  Add an explicit sort
-                        * if we couldn't make the path come out the way the Unique node
-                        * needs it.  If we do have to sort, always sort by the more
-                        * rigorous of DISTINCT and ORDER BY, to avoid a second sort
-                        * below.  However, for regular DISTINCT, don't sort now if we
-                        * don't have to --- sorting afterwards will likely be cheaper,
-                        * and also has the possibility of optimizing via LIMIT.  But for
-                        * DISTINCT ON, we *must* force the final sort now, else it won't
-                        * have the desired behavior.
+                        * Set up the WITH CHECK OPTION and RETURNING lists-of-lists, if
+                        * needed.
                         */
-                       List       *needed_pathkeys;
-                       if (parse->hasDistinctOn &&
-                               list_length(root->distinct_pathkeys) <
-                               list_length(root->sort_pathkeys))
-                               needed_pathkeys = root->sort_pathkeys;
+                       if (parse->withCheckOptions)
+                               withCheckOptionLists = list_make1(parse->withCheckOptions);
                        else
-                               needed_pathkeys = root->distinct_pathkeys;
-                       if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
-                       {
-                               if (list_length(root->distinct_pathkeys) >=
-                                       list_length(root->sort_pathkeys))
-                                       current_pathkeys = root->distinct_pathkeys;
-                               else
-                               {
-                                       current_pathkeys = root->sort_pathkeys;
-                                       /* Assert checks that parser didn't mess up... */
-                                       Assert(pathkeys_contained_in(root->distinct_pathkeys,
-                                                                                                current_pathkeys));
-                               }
+                               withCheckOptionLists = NIL;
  
-                               result_plan = (Plan *) make_sort_from_pathkeys(root,
-                                                                                                                          result_plan,
-                                                                                                                       current_pathkeys,
-                                                                                                                          -1.0);
-                       }
+                       if (parse->returningList)
+                               returningLists = list_make1(parse->returningList);
+                       else
+                               returningLists = NIL;
  
-       /*
-        * If ORDER BY was given and we were not able to make the plan come out in
-        * the right order, add an explicit sort step.
-        */
-       if (parse->sortClause)
-       {
-               if (!pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
-               {
-                       result_plan = (Plan *) make_sort_from_pathkeys(root,
-                                                                                                                  result_plan,
-                                                                                                                root->sort_pathkeys,
-                                                                                                                  limit_tuples);
-                       current_pathkeys = root->sort_pathkeys;
++<<<<<<< HEAD
 +#ifdef XCP
 +                      result_plan = grouping_distribution(root, result_plan,
 +                                                                                      list_length(parse->distinctClause),
 +                                                                 extract_grouping_cols(parse->distinctClause,
 +                                                                                                        result_plan->targetlist),
 +                                                                                              current_pathkeys,
 +                                                                                              &distribution);
 +#endif
 +                      result_plan = (Plan *) make_unique(result_plan,
 +                                                                                         parse->distinctClause);
 +                      result_plan->plan_rows = dNumDistinctRows;
 +                      /* The Unique node won't change sort ordering */
 +              }
 +      }
++=======
+                       /*
+                        * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
+                        * will have dealt with fetching non-locked marked rows, else we
+                        * need to have ModifyTable do that.
+                        */
+                       if (parse->rowMarks)
+                               rowMarks = NIL;
+                       else
+                               rowMarks = root->rowMarks;
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
+                       path = (Path *)
+                               create_modifytable_path(root, final_rel,
+                                                                               parse->commandType,
+                                                                               parse->canSetTag,
+                                                                               parse->resultRelation,
+                                                                               list_make1_int(parse->resultRelation),
+                                                                               list_make1(path),
+                                                                               list_make1(root),
+                                                                               withCheckOptionLists,
+                                                                               returningLists,
+                                                                               rowMarks,
+                                                                               parse->onConflict,
+                                                                               SS_assign_special_param(root));
                }
-       }
-       /*
-        * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
-        * (Note: we intentionally test parse->rowMarks not root->rowMarks here.
-        * If there are only non-locking rowmarks, they should be handled by the
-        * ModifyTable node instead.)
-        */
-       if (parse->rowMarks)
-       {
-               result_plan = (Plan *) make_lockrows(result_plan,
-                                                                                        root->rowMarks,
-                                                                                        SS_assign_special_param(root));
  
-               /*
-                * The result can no longer be assumed sorted, since locking might
-                * cause the sort key columns to be replaced with new values.
-                */
-               current_pathkeys = NIL;
+               /* And shove it into final_rel */
+               add_path(final_rel, path);
        }
  
        /*
-        * Finally, if there is a LIMIT/OFFSET clause, add the LIMIT node.
+        * If there is an FDW that's responsible for all baserels of the query,
+        * let it consider adding ForeignPaths.
         */
-       /*
-        * Return the actual output ordering in query_pathkeys for possible use by
-        * an outer query level.
-        */
-       root->query_pathkeys = current_pathkeys;
++<<<<<<< HEAD
 +      if (limit_needed(parse))
 +      {
 +#ifdef XCP
 +              /* We should put Limit on top of distributed results */
 +              if (distribution)
 +              {
 +                      result_plan = (Plan *)
 +                                      make_remotesubplan(root, result_plan, NULL,
 +                                                                         distribution, current_pathkeys);
 +                      distribution = NULL;
 +              }
 +#endif
 +              result_plan = (Plan *) make_limit(result_plan,
 +                                                                                parse->limitOffset,
 +                                                                                parse->limitCount,
 +                                                                                offset_est,
 +                                                                                count_est);
 +      }
++=======
+       if (final_rel->fdwroutine &&
+               final_rel->fdwroutine->GetForeignUpperPaths)
+               final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
+                                                                                                       current_rel, final_rel);
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
+       /* Let extensions possibly add some more paths */
+       if (create_upper_paths_hook)
+               (*create_upper_paths_hook) (root, UPPERREL_FINAL,
+                                                                       current_rel, final_rel);
++<<<<<<< HEAD
 +#ifdef XCP
 +      /*
 +       * Adjust query distribution if requested
 +       */
 +      if (root->distribution)
 +      {
 +              if (equal_distributions(root, root->distribution, distribution))
 +              {
 +                      if (IsLocatorReplicated(distribution->distributionType) &&
 +                                      contain_volatile_functions((Node *) result_plan->targetlist))
 +                              ereport(ERROR,
 +                                              (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
 +                                               errmsg("can not update replicated table with result of volatile function")));
 +                      /*
 +                       * Source tuple will be consumed on the same node where it is
 +                       * produced, so if it is known that some node does not yield tuples
 +                       * we do not want to send subquery for execution on these nodes
 +                       * at all.
 +                       * So copy the restriction to the external distribution.
 +                       * XXX Is that ever possible if external restriction is already
 +                       * defined? If yes we probably should use intersection of the sets,
 +                       * and if resulting set is empty create dummy plan and set it as
 +                       * the result_plan. Need to think this over
 +                       */
 +                      root->distribution->restrictNodes =
 +                                      bms_copy(distribution->restrictNodes);
 +              }
 +              else
 +              {
 +                      RemoteSubplan *distributePlan;
 +                      /*
 +                       * If the planned statement is either UPDATE or DELETE different
 +                       * distributions here mean the ModifyTable node will be placed on
 +                       * top of RemoteSubquery. UPDATE and DELETE versions of ModifyTable
 +                       * use TID of incoming tuple to apply the changes, but the
 +                       * RemoteSubquery node supplies RemoteTuples, without such field.
 +                       * Therefore we can not execute such plan.
 +                       * Most common case is when UPDATE statement modifies the
 +                       * distribution column. Also incorrect distributed plan is possible
 +                       * if planning a complex UPDATE or DELETE statement involving table
 +                       * join.
 +                       * We output different error messages in UPDATE and DELETE cases
 +                       * mostly for compatibility with PostgresXC. It is hard to determine
 +                       * here, if such plan is because updated partitioning key or poorly
 +                       * planned join, so in case of UPDATE we assume the first case as
 +                       * more probable, for DELETE the second case is only possible.
 +                       * The error message may be misleading, if that is UPDATE and join,
 +                       * but hope we will target distributed update problem soon.
 +                       * There are two ways of fixing that:
 +                       * 1. Improve distribution planner to never consider to redistribute
 +                       * target table. So if planner finds that it has no choice, it would
 +                       * throw error somewhere else. So here we only be catching cases of
 +                       * updating distribution columns.
 +                       * 2. Modify executor and allow distribution column updates. However
 +                       * there are a lot of issues behind the scene when implementing that
 +                       * approach.
 +                       */
 +                      if (parse->commandType == CMD_UPDATE)
 +                              ereport(ERROR,
 +                                              (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
 +                                               errmsg("could not plan this distributed update"),
 +                                               errdetail("correlated UPDATE or updating distribution column currently not supported in Postgres-XL.")));
 +                      if (parse->commandType == CMD_DELETE)
 +                              ereport(ERROR,
 +                                              (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
 +                                               errmsg("could not plan this distributed delete"),
 +                                               errdetail("correlated or complex DELETE is currently not supported in Postgres-XL.")));
 +
 +                      /*
 +                       * Redistribute result according to requested distribution.
 +                       */
 +                      if ((distributePlan = find_push_down_plan(result_plan, true)))
 +                      {
 +                              Bitmapset  *tmpset;
 +                              int                     nodenum;
 +
 +                              distributePlan->distributionType = root->distribution->distributionType;
 +                              distributePlan->distributionKey = InvalidAttrNumber;
 +                              if (root->distribution->distributionExpr)
 +                              {
 +                                      ListCell   *lc;
 +
 +                                      /* Find distribution expression in the target list */
 +                                      foreach(lc, distributePlan->scan.plan.targetlist)
 +                                      {
 +                                              TargetEntry *tle = (TargetEntry *) lfirst(lc);
 +
 +                                              if (equal(tle->expr, root->distribution->distributionExpr))
 +                                              {
 +                                                      distributePlan->distributionKey = tle->resno;
 +                                                      break;
 +                                              }
 +                                      }
 +
 +                                      if (distributePlan->distributionKey == InvalidAttrNumber)
 +                                      {
 +                                              Plan       *lefttree = distributePlan->scan.plan.lefttree;
 +                                              Plan       *plan;
 +                                              TargetEntry *newtle;
 +
 +                                              /* The expression is not found, need to add junk */
 +                                              newtle = makeTargetEntry((Expr *) root->distribution->distributionExpr,
 +                                                                                               list_length(lefttree->targetlist) + 1,
 +                                                                                               NULL,
 +                                                                                               true);
 +
 +                                              if (is_projection_capable_plan(lefttree))
 +                                              {
 +                                                      /* Ok to modify subplan's target list */
 +                                                      lefttree->targetlist = lappend(lefttree->targetlist,
 +                                                                                                                 newtle);
 +                                              }
 +                                              else
 +                                              {
 +                                                      /* Use Result node to calculate expression */
 +                                                      List *newtlist = list_copy(lefttree->targetlist);
 +                                                      newtlist = lappend(newtlist, newtle);
 +                                                      lefttree = (Plan *) make_result(root, newtlist, NULL, lefttree);
 +                                                      distributePlan->scan.plan.lefttree = lefttree;
 +                                              }
 +                                              /* Update all the hierarchy */
 +                                              for (plan = result_plan; plan != lefttree; plan = plan->lefttree)
 +                                                      plan->targetlist = lefttree->targetlist;
 +                                      }
 +                              }
 +                              tmpset = bms_copy(root->distribution->nodes);
 +                              distributePlan->distributionNodes = NIL;
 +                              while ((nodenum = bms_first_member(tmpset)) >= 0)
 +                                      distributePlan->distributionNodes = lappend_int(
 +                                                      distributePlan->distributionNodes, nodenum);
 +                              bms_free(tmpset);
 +                      }
 +                      else if (!(IsA(result_plan, Result) && result_plan->lefttree ==
 +                                              NULL &&
 +                                              ((root->distribution->distributionType == 'H' &&
 +                                               bms_num_members(root->distribution->restrictNodes) == 1) ||
 +                                               (root->distribution->distributionType == 'R' &&
 +                                                !contain_mutable_functions((Node *)result_plan->targetlist)))))
 +                              result_plan = (Plan *) make_remotesubplan(root,
 +                                                                                                                result_plan,
 +                                                                                                                root->distribution,
 +                                                                                                                distribution,
 +                                                                                                                NULL);
 +              }
 +      }
 +      else
 +      {
 +              /*
 +               * Inform caller about distribution of the subplan
 +               */
 +              root->distribution = distribution;
 +      }
 +#endif
 +
 +      return result_plan;
++=======
+       /* Note: currently, we leave it to callers to do set_cheapest() */
++>>>>>>> b5bce6c1ec6061c8a4f730d927e162db7e2ce365
  }
  
  
index c67b7ab9976e0f7ef17beb7d9f386c04fd5afa56,d10a98396c7f75566ed0064ba9fa392b2bc54d7a..69c4da4c1689d32d35d01222f1c01b7ed90b2555
@@@ -4,8 -4,7 +4,8 @@@
   *      Post-processing of a completed plan tree: fix references to subplan
   *      vars, compute regproc values for operators, etc
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -1170,32 -1129,12 +1203,35 @@@ set_foreignscan_references(PlannerInfo 
  
        if (fscan->fdw_scan_tlist != NIL || fscan->scan.scanrelid == 0)
        {
-               /* Adjust tlist, qual, fdw_exprs to reference custom scan tuple */
+               /*
+                * Adjust tlist, qual, fdw_exprs, fdw_recheck_quals to reference
+                * foreign scan tuple
+                */
                indexed_tlist *itlist = build_tlist_index(fscan->fdw_scan_tlist);
  
 +#ifdef XCP
 +              fscan->scan.plan.targetlist = (List *)
 +                      fix_upper_expr(root,
 +                                                 (Node *) fscan->scan.plan.targetlist,
 +                                                 itlist,
 +                                                 INDEX_VAR,
 +                                                 rtoffset,
 +                                                 false);
 +              fscan->scan.plan.qual = (List *)
 +                      fix_upper_expr(root,
 +                                                 (Node *) fscan->scan.plan.qual,
 +                                                 itlist,
 +                                                 INDEX_VAR,
 +                                                 rtoffset,
 +                                                 false);
 +              fscan->fdw_exprs = (List *)
 +                      fix_upper_expr(root,
 +                                                 (Node *) fscan->fdw_exprs,
 +                                                 itlist,
 +                                                 INDEX_VAR,
 +                                                 rtoffset,
 +                                                 false);
 +#else         
                fscan->scan.plan.targetlist = (List *)
                        fix_upper_expr(root,
                                                   (Node *) fscan->scan.plan.targetlist,
                                                   itlist,
                                                   INDEX_VAR,
                                                   rtoffset);
 +#endif
+               fscan->fdw_recheck_quals = (List *)
+                       fix_upper_expr(root,
+                                                  (Node *) fscan->fdw_recheck_quals,
+                                                  itlist,
+                                                  INDEX_VAR,
+                                                  rtoffset);
                pfree(itlist);
                /* fdw_scan_tlist itself just needs fix_scan_list() adjustments */
                fscan->fdw_scan_tlist =
index b4c8c52ddec95af018a033a7c29549bb04c7444a,a46cc108203706e393465c5892f2cb1f6f73a6d1..3f0d6c378ef229704d5ef30917013c7609132bd2
@@@ -3,8 -3,7 +3,8 @@@
   * subselect.c
   *      Planning routines for subselects and parameters.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
@@@ -529,29 -529,10 +532,26 @@@ make_subplan(PlannerInfo *root, Query *
        /* plan_params should not be in use in current query level */
        Assert(root->plan_params == NIL);
  
-       /*
-        * Generate the plan for the subquery.
-        */
-       plan = subquery_planner(root->glob, subquery,
-                                                       root,
-                                                       false, tuple_fraction,
-                                                       &subroot);
+       /* Generate Paths for the subquery */
+       subroot = subquery_planner(root->glob, subquery,
+                                                          root,
+                                                          false, tuple_fraction);
 +#ifdef XCP
 +      if (subroot->distribution)
 +      {
 +              plan = (Plan *) make_remotesubplan(subroot,
 +                                                                                 plan,
 +                                                                                 NULL,
 +                                                                                 subroot->distribution,
 +                                                                                 subroot->query_pathkeys);
 +              /*
 +               * SS_finalize_plan has already been run on the subplan,
 +               * so we have to copy parameter info to wrapper plan node.
 +               */
 +              plan->extParam = bms_copy(plan->lefttree->extParam);
 +              plan->allParam = bms_copy(plan->lefttree->allParam);
 +      }
 +#endif
  
        /* Isolate the params needed by this specific subplan */
        plan_params = root->plan_params;
@@@ -1181,29 -1174,12 +1198,28 @@@ SS_process_ctes(PlannerInfo *root
                Assert(root->plan_params == NIL);
  
                /*
-                * Generate the plan for the CTE query.  Always plan for full
-                * retrieval --- we don't have enough info to predict otherwise.
+                * Generate Paths for the CTE query.  Always plan for full retrieval
+                * --- we don't have enough info to predict otherwise.
                 */
-               plan = subquery_planner(root->glob, subquery,
-                                                               root,
-                                                               cte->cterecursive, 0.0,
-                                                               &subroot);
+               subroot = subquery_planner(root->glob, subquery,
+                                                                  root,
+                                                                  cte->cterecursive, 0.0);
 +#ifdef XCP
 +              if (subroot->distribution)
 +              {
 +                      plan = (Plan *) make_remotesubplan(subroot,
 +                                                                                         plan,
 +                                                                                         NULL,
 +                                                                                         subroot->distribution,
 +                                                                                         subroot->query_pathkeys);
 +                      /*
 +                       * SS_finalize_plan has already been run on the subplan,
 +                       * so we have to copy parameter info to wrapper plan node.
 +                       */
 +                      plan->extParam = bms_copy(plan->lefttree->extParam);
 +                      plan->allParam = bms_copy(plan->lefttree->allParam);
 +              }
 +#endif
  
                /*
                 * Since the current query level doesn't yet contain any RTEs, it
index d8f5df88e3936e04a148eda36d48e6688f0efa27,a334f15773ad3fbed7cc96e145729bc2ba0c57c2..5fa672d02cc9388d3fb1b48fb0a301e499494571
@@@ -909,10 -908,15 +911,16 @@@ pull_up_simple_subquery(PlannerInfo *ro
        subroot->eq_classes = NIL;
        subroot->append_rel_list = NIL;
        subroot->rowMarks = NIL;
+       memset(subroot->upper_rels, 0, sizeof(subroot->upper_rels));
+       memset(subroot->upper_targets, 0, sizeof(subroot->upper_targets));
+       subroot->processed_tlist = NIL;
+       subroot->grouping_map = NULL;
+       subroot->minmax_aggs = NIL;
+       subroot->hasInheritedTarget = false;
        subroot->hasRecursion = false;
        subroot->wt_param_id = -1;
-       subroot->non_recursive_plan = NULL;
 +      subroot->recursiveOk = true;
+       subroot->non_recursive_path = NULL;
  
        /* No CTEs to worry about */
        Assert(subquery->cteList == NIL);
index 1c0c9ad9cbd673bec3bb9398334d7e7d828a2c9b,1c8d1052c58e0520c07130c9d0cea6966fc1cb3b..64cd7262d07a2ef8ca9e12cd438e5e78a8f459c5
@@@ -9,13 -9,25 +9,25 @@@
   * list and row ID information needed for SELECT FOR UPDATE locking and/or
   * EvalPlanQual checking.
   *
-  * NOTE: the rewriter's rewriteTargetListIU and rewriteTargetListUD
-  * routines also do preprocessing of the targetlist.  The division of labor
-  * between here and there is a bit arbitrary and historical.
+  * The rewriter's rewriteTargetListIU and rewriteTargetListUD routines
+  * also do preprocessing of the targetlist.  The division of labor between
+  * here and there is partially historical, but it's not entirely arbitrary.
+  * In particular, consider an UPDATE across an inheritance tree.  What the
+  * rewriter does need be done only once (because it depends only on the
+  * properties of the parent relation).  What's done here has to be done over
+  * again for each child relation, because it depends on the column list of
+  * the child, which might have more columns and/or a different column order
+  * than the parent.
   *
+  * The fact that rewriteTargetListIU sorts non-resjunk tlist entries by column
+  * position, which expand_targetlist depends on, violates the above comment
+  * because the sorting is only valid for the parent relation.  In inherited
+  * UPDATE cases, adjust_inherited_tlist runs in between to take care of fixing
+  * the tlists for child tables to keep expand_targetlist happy.  We do it like
+  * that because it's faster in typical non-inherited cases.
   *
 - *
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
  #include "access/sysattr.h"
  #include "catalog/pg_type.h"
  #include "nodes/makefuncs.h"
 +#ifdef XCP
 +#include "nodes/nodeFuncs.h"
 +#include "optimizer/clauses.h"
 +#endif
  #include "optimizer/prep.h"
  #include "optimizer/tlist.h"
+ #include "optimizer/var.h"
  #include "parser/parsetree.h"
  #include "parser/parse_coerce.h"
  #include "utils/rel.h"
index 84b503a6a8ed39e2dd54d88527a03e36da9aa1af,b7147832e0f82db304309ff0e90f996904716158..ad8058250fc9a4ad32b6ff07727bc26dc43dffcf
@@@ -17,8 -17,7 +17,8 @@@
   * append relations, and thenceforth share code with the UNION ALL case.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -243,30 -277,11 +278,28 @@@ recurse_set_operations(Node *setOp, Pla
                /* plan_params should not be in use in current query level */
                Assert(root->plan_params == NIL);
  
-               /*
-                * Generate plan for primitive subquery
-                */
-               subplan = subquery_planner(root->glob, subquery,
-                                                                  root,
-                                                                  false, tuple_fraction,
-                                                                  &subroot);
+               /* Generate a subroot and Paths for the subquery */
+               subroot = rel->subroot = subquery_planner(root->glob, subquery,
+                                                                                                 root,
+                                                                                                 false,
+                                                                                                 root->tuple_fraction);
 +#ifdef XCP
 +              if (subroot->distribution)
 +              {
 +                      subplan = (Plan *) make_remotesubplan(subroot,
 +                                                                                                subplan,
 +                                                                                                NULL,
 +                                                                                                subroot->distribution,
 +                                                                                                subroot->query_pathkeys);
 +              }
 +#endif
 +
 +              /* Save subroot and subplan in RelOptInfo for setrefs.c */
 +              rel->subplan = subplan;
 +              rel->subroot = subroot;
 +
 +              if (root->recursiveOk)  
 +                      root->recursiveOk = subroot->recursiveOk;
  
                /*
                 * It should not be possible for the primitive query to contain any
index f94cc3fb6bd852ff79fe4fa7991580e805b69dce,ce7ad545a95fcebc008f6a5254eaff3b1ac0d574..60976c9ea309cfe016a402e598379e13b4ed98d2
@@@ -3,8 -3,7 +3,8 @@@
   * pathnode.c
   *      Routines to manipulate pathlists and create path nodes
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -1694,23 -947,14 +1939,27 @@@ create_seqscan_path(PlannerInfo *root, 
  
        pathnode->pathtype = T_SeqScan;
        pathnode->parent = rel;
+       pathnode->pathtarget = rel->reltarget;
        pathnode->param_info = get_baserel_parampathinfo(root, rel,
                                                                                                         required_outer);
+       pathnode->parallel_aware = parallel_workers > 0 ? true : false;
+       pathnode->parallel_safe = rel->consider_parallel;
+       pathnode->parallel_workers = parallel_workers;
        pathnode->pathkeys = NIL;       /* seqscan has unordered result */
  
 +#ifdef XCP
 +      set_scanpath_distribution(root, rel, pathnode);
 +      if (rel->baserestrictinfo)
 +      {
 +              ListCell *lc;
 +              foreach (lc, rel->baserestrictinfo)
 +              {
 +                      RestrictInfo *ri = (RestrictInfo *) lfirst(lc);
 +                      restrict_distribution(root, ri, pathnode);
 +              }
 +      }
 +#endif
 +
        cost_seqscan(pathnode, root, rel, pathnode->param_info);
  
        return pathnode;
@@@ -1727,23 -971,14 +1976,27 @@@ create_samplescan_path(PlannerInfo *roo
  
        pathnode->pathtype = T_SampleScan;
        pathnode->parent = rel;
+       pathnode->pathtarget = rel->reltarget;
        pathnode->param_info = get_baserel_parampathinfo(root, rel,
                                                                                                         required_outer);
+       pathnode->parallel_aware = false;
+       pathnode->parallel_safe = rel->consider_parallel;
+       pathnode->parallel_workers = 0;
        pathnode->pathkeys = NIL;       /* samplescan has unordered result */
  
 +#ifdef XCP
 +      set_scanpath_distribution(root, rel, pathnode);
 +      if (rel->baserestrictinfo)
 +      {
 +              ListCell *lc;
 +              foreach (lc, rel->baserestrictinfo)
 +              {
 +                      RestrictInfo *ri = (RestrictInfo *) lfirst(lc);
 +                      restrict_distribution(root, ri, pathnode);
 +              }
 +      }
 +#endif
 +
        cost_samplescan(pathnode, root, rel, pathnode->param_info);
  
        return pathnode;
@@@ -1982,78 -1210,14 +2272,82 @@@ create_append_path(RelOptInfo *rel, Lis
  
        pathnode->path.pathtype = T_Append;
        pathnode->path.parent = rel;
+       pathnode->path.pathtarget = rel->reltarget;
        pathnode->path.param_info = get_appendrel_parampathinfo(rel,
                                                                                                                        required_outer);
+       pathnode->path.parallel_aware = false;
+       pathnode->path.parallel_safe = rel->consider_parallel;
+       pathnode->path.parallel_workers = parallel_workers;
        pathnode->path.pathkeys = NIL;          /* result is always considered
                                                                                 * unsorted */
 +#ifdef XCP
 +      /*
 +       * Append path is used to implement scans of inherited tables and some
 +       * "set" operations, like UNION ALL. While all inherited tables should
 +       * have the same distribution, UNION'ed queries may have different.
 +       * When paths being appended have the same distribution it is OK to push
 +       * Append down to the data nodes. If not, perform "coordinator" Append.
 +       */
 +
 +      /* Special case of the dummy relation, if the subpaths list is empty */
 +      if (subpaths)
 +      {
 +              /* Take distribution of the first node */
 +              l = list_head(subpaths);
 +              subpath = (Path *) lfirst(l);
 +              distribution = copyObject(subpath->distribution);
 +              /*
 +               * Check remaining subpaths, if all distributions equal to the first set
 +               * it as a distribution of the Append path; otherwise make up coordinator
 +               * Append
 +               */
 +              while ((l = lnext(l)))
 +              {
 +                      subpath = (Path *) lfirst(l);
 +
 +                      /*
 +                       * For Append and MergeAppend paths, we are most often dealing with
 +                       * different relations, appended together. So its very likely that
 +                       * the distribution for each relation will have a different varno.
 +                       * But we should be able to push down Append and MergeAppend as
 +                       * long as rest of the distribution information matches.
 +                       *
 +                       * equalDistribution() compares everything except the varnos
 +                       */
 +                      if (equalDistribution(distribution, subpath->distribution))
 +                      {
 +                              /*
 +                               * Both distribution and subpath->distribution may be NULL at
 +                               * this point, or they both are not null.
 +                               */
 +                              if (distribution && subpath->distribution->restrictNodes)
 +                                      distribution->restrictNodes = bms_union(
 +                                                      distribution->restrictNodes,
 +                                                      subpath->distribution->restrictNodes);
 +                      }
 +                      else
 +                      {
 +                              break;
 +                      }
 +              }
 +              if (l)
 +              {
 +                      List *newsubpaths = NIL;
 +                      foreach(l, subpaths)
 +                      {
 +                              subpath = (Path *) lfirst(l);
 +                              if (subpath->distribution)
 +                                      subpath = redistribute_path(subpath, LOCATOR_TYPE_NONE,
 +                                                                                              NULL, NULL, NULL);
 +                              newsubpaths = lappend(newsubpaths, subpath);
 +                      }
 +                      subpaths = newsubpaths;
 +                      pathnode->path.distribution = NULL;
 +              }
 +              else
 +                      pathnode->path.distribution = distribution;
 +      }
 +#endif
        pathnode->subpaths = subpaths;
  
        /*
@@@ -2107,68 -1269,12 +2403,72 @@@ create_merge_append_path(PlannerInfo *r
  
        pathnode->path.pathtype = T_MergeAppend;
        pathnode->path.parent = rel;
 +#ifdef XCP
 +      /*
 +       * It is safe to push down MergeAppend if all subpath distributions
 +       * are the same and these distributions are Replicated or distribution key
 +       * is the expression of the first pathkey.
 +       */
 +      /* Take distribution of the first node */
 +      l = list_head(subpaths);
 +      subpath = (Path *) lfirst(l);
 +      distribution = copyObject(subpath->distribution);
 +      /*
 +       * Verify if it is safe to push down MergeAppend with this distribution.
 +       * TODO implement check of the second condition (distribution key is the
 +       * first pathkey)
 +       */
 +      if (distribution == NULL || IsLocatorReplicated(distribution->distributionType))
 +      {
 +              /*
 +               * Check remaining subpaths, if all distributions equal to the first set
 +               * it as a distribution of the Append path; otherwise make up coordinator
 +               * Append
 +               */
 +              while ((l = lnext(l)))
 +              {
 +                      subpath = (Path *) lfirst(l);
 +
 +                      /*
 +                       * See comments in Append path
 +                       */
 +                      if (distribution && equalDistribution(distribution, subpath->distribution))
 +                      {
 +                              if (subpath->distribution->restrictNodes)
 +                                      distribution->restrictNodes = bms_union(
 +                                                      distribution->restrictNodes,
 +                                                      subpath->distribution->restrictNodes);
 +                      }
 +                      else
 +                      {
 +                              break;
 +                      }
 +              }
 +      }
 +      if (l)
 +      {
 +              List *newsubpaths = NIL;
 +              foreach(l, subpaths)
 +              {
 +                      subpath = (Path *) lfirst(l);
 +                      if (subpath->distribution)
 +                              subpath = redistribute_path(subpath, LOCATOR_TYPE_NONE,
 +                                                                                      NULL, NULL, NULL);
 +                      newsubpaths = lappend(newsubpaths, subpath);
 +              }
 +              subpaths = newsubpaths;
 +              pathnode->path.distribution = NULL;
 +      }
 +      else
 +              pathnode->path.distribution = distribution;
 +#endif
 +
+       pathnode->path.pathtarget = rel->reltarget;
        pathnode->path.param_info = get_appendrel_parampathinfo(rel,
                                                                                                                        required_outer);
+       pathnode->path.parallel_aware = false;
+       pathnode->path.parallel_safe = rel->consider_parallel;
+       pathnode->path.parallel_workers = 0;
        pathnode->path.pathkeys = pathkeys;
        pathnode->subpaths = subpaths;
  
@@@ -2540,33 -1659,70 +2862,79 @@@ translate_sub_tlist(List *tlist, int re
        return result;
  }
  
+ /*
+  * create_gather_path
+  *      Creates a path corresponding to a gather scan, returning the
+  *      pathnode.
+  *
+  * 'rows' may optionally be set to override row estimates from other sources.
+  */
+ GatherPath *
+ create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
+                                  PathTarget *target, Relids required_outer, double *rows)
+ {
+       GatherPath *pathnode = makeNode(GatherPath);
+       Assert(subpath->parallel_safe);
+       pathnode->path.pathtype = T_Gather;
+       pathnode->path.parent = rel;
+       pathnode->path.pathtarget = target;
+       pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+                                                                                                                 required_outer);
+       pathnode->path.parallel_aware = false;
+       pathnode->path.parallel_safe = false;
+       pathnode->path.parallel_workers = subpath->parallel_workers;
+       pathnode->path.pathkeys = NIL;          /* Gather has unordered result */
+       pathnode->subpath = subpath;
+       pathnode->single_copy = false;
+       if (pathnode->path.parallel_workers == 0)
+       {
+               pathnode->path.parallel_workers = 1;
+               pathnode->path.pathkeys = subpath->pathkeys;
+               pathnode->single_copy = true;
+       }
+       cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
+       return pathnode;
+ }
  /*
   * create_subqueryscan_path
-  *      Creates a path corresponding to a sequential scan of a subquery,
+  *      Creates a path corresponding to a scan of a subquery,
   *      returning the pathnode.
   */
- Path *
+ SubqueryScanPath *
 +#ifdef XCP
- create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
++create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
 +                                               List *pathkeys, Relids required_outer,
 +                                               Distribution *distribution)
 +#else
- create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
+ create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
                                                 List *pathkeys, Relids required_outer)
 +#endif
  {
-       Path       *pathnode = makeNode(Path);
+       SubqueryScanPath *pathnode = makeNode(SubqueryScanPath);
  
-       pathnode->pathtype = T_SubqueryScan;
-       pathnode->parent = rel;
-       pathnode->param_info = get_baserel_parampathinfo(root, rel,
-                                                                                                        required_outer);
-       pathnode->pathkeys = pathkeys;
 +#ifdef XCP
 +      pathnode->distribution = distribution;
 +#endif
+       pathnode->path.pathtype = T_SubqueryScan;
+       pathnode->path.parent = rel;
+       pathnode->path.pathtarget = rel->reltarget;
+       pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+                                                                                                                 required_outer);
+       pathnode->path.parallel_aware = false;
+       pathnode->path.parallel_safe = rel->consider_parallel &&
+               subpath->parallel_safe;
+       pathnode->path.parallel_workers = subpath->parallel_workers;
+       pathnode->path.pathkeys = pathkeys;
+       pathnode->subpath = subpath;
  
-       cost_subqueryscan(pathnode, root, rel, pathnode->param_info);
+       cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info);
  
        return pathnode;
  }
@@@ -3082,13 -3195,15 +4472,28 @@@ reparameterize_path(PlannerInfo *root, 
                                                                                                                loop_count);
                        }
                case T_SubqueryScan:
-                       return create_subqueryscan_path(root, rel, path->pathkeys,
-                                                                                       required_outer, path->distribution);
 +#ifdef XCP
-                       return create_subqueryscan_path(root, rel, path->pathkeys,
-                                                                                       required_outer);
++                      {
++                              SubqueryScanPath *spath = (SubqueryScanPath *) path;
++
++                              return (Path *) create_subqueryscan_path(root,
++                                                                                                               rel,
++                                                                                                               spath->subpath,
++                                                                                                               spath->path.pathkeys,
++                                                                                                               required_outer,
++                                                                                                               path->distribution);
++                      }
 +#else
+                       {
+                               SubqueryScanPath *spath = (SubqueryScanPath *) path;
+                               return (Path *) create_subqueryscan_path(root,
+                                                                                                                rel,
+                                                                                                                spath->subpath,
+                                                                                                                spath->path.pathkeys,
+                                                                                                                required_outer);
+                       }
 +#endif
                default:
                        break;
        }
index 17f3d77b31f26991f62d3a86538ec2e252a4d7ec,5d18206b91b3527e1e9f8741545b1fefea6cfde0..2b50919b10887bce1f535f5b943962ae701cbbd7
@@@ -4,8 -4,7 +4,8 @@@
   *       routines for accessing the system catalogs
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index 60084581b03d073e82957ed4bda3c5f69a29767d,806600ed107d1a0c5db1081190e78921556cce1d..bdc8a5134cd737322fd1793882efddf443a3ed52
  #include "optimizer/placeholder.h"
  #include "optimizer/plancat.h"
  #include "optimizer/restrictinfo.h"
+ #include "optimizer/tlist.h"
  #include "utils/hsearch.h"
  
 +#ifdef PGXC
 +#include "pgxc/pgxc.h"
 +#endif
  
  typedef struct JoinHashEntry
  {
index edcf81ccdd6b5ef8a16c56c136cd1e8676295774,eac86cce3ee595077af91fe1dc15db0b10435899..90603dd5e556cedda9f986b6012ec39f4ca96580
@@@ -14,8 -14,7 +14,8 @@@
   * contain optimizable statements, which we should transform.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *    src/backend/parser/analyze.c
@@@ -98,17 -73,12 +99,20 @@@ static Query *transformExplainStmt(Pars
                                         ExplainStmt *stmt);
  static Query *transformCreateTableAsStmt(ParseState *pstate,
                                                   CreateTableAsStmt *stmt);
 +#ifdef PGXC
 +static Query *transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt);
 +#endif
 +
  static void transformLockingClause(ParseState *pstate, Query *qry,
                                           LockingClause *lc, bool pushedDown);
+ #ifdef RAW_EXPRESSION_COVERAGE_TEST
+ static bool test_raw_expression_coverage(Node *node, void *context);
+ #endif
  
 +#ifdef XCP
 +static void ParseAnalyze_rtable_walk(List *rtable);
 +static void ParseAnalyze_substitute_func(FuncExpr *funcexpr);
 +#endif
  
  /*
   * parse_analyze
@@@ -2880,93 -2785,24 +3005,115 @@@ applyLockingClause(Query *qry, Index rt
        qry->rowMarks = lappend(qry->rowMarks, rc);
  }
  
 +#ifdef XCP
 +post_parse_analyze_hook_type prev_ParseAnalyze_callback;
 +
 +/*
 + * Check if the query contains references to any pg_catalog tables that should
 + * be remapped to storm_catalog. The list is obtained from the
 + * storm_catalog_remap_string GUC. Also do this only for normal users
 + */
 +void
 +ParseAnalyze_callback(ParseState *pstate, Query *query)
 +{
 +      if (prev_ParseAnalyze_callback)
 +              prev_ParseAnalyze_callback(pstate, query);
 +
 +      if (query && query->commandType == CMD_UTILITY)
 +              return;
 +      
 +      ParseAnalyze_rtable_walk(query->rtable);
 +}
 +
 +static void
 +ParseAnalyze_rtable_walk(List *rtable)
 +{
 +      ListCell                *item;
 +
 +      if (!IsUnderPostmaster || superuser())
 +              return;
 +
 +      foreach(item, rtable)
 +      {
 +              RangeTblEntry *rte = (RangeTblEntry *) lfirst(item);
 +
 +              if (rte->rtekind == RTE_FUNCTION)
 +              {
 +                      ListCell                *lc;
 +                      foreach(lc, rte->functions)
 +                      {
 +                              RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
 +                              ParseAnalyze_substitute_func((FuncExpr *) rtfunc->funcexpr);
 +                      }
 +              }
 +              else if (rte->rtekind == RTE_SUBQUERY) /* recurse for subqueries */
 +                               ParseAnalyze_rtable_walk(rte->subquery->rtable);
 +      }
 +}
 +
 +static void
 +ParseAnalyze_substitute_func(FuncExpr *funcexpr)
 +{
 +      StringInfoData  buf;
 +      initStringInfo(&buf);
 +
 +      if (get_func_namespace(funcexpr->funcid) == PG_CATALOG_NAMESPACE)
 +      {
 +              Oid funcid = InvalidOid;
 +              const char *funcname = get_func_name(funcexpr->funcid);
 +
 +              /* Check if the funcname is in storm_catalog_remap_string */
 +              appendStringInfoString(&buf, funcname);
 +              appendStringInfoChar(&buf, ',');
 +
 +              elog(DEBUG2, "the constructed name is %s", buf.data);
 +
 +              /*
 +               * The unqualified function name should be satisfied from the
 +               * storm_catalog appropriately. Just provide a warning for now if
 +               * it is not..
 +               */
 +              if (strstr(storm_catalog_remap_string, buf.data))
 +              {
 +                      Oid *argtypes = NULL;
 +                      int nargs;
 +
 +                      get_func_signature(funcexpr->funcid, &argtypes, &nargs);
 +                      funcid = get_funcid(funcname, buildoidvector(argtypes, nargs),
 +                                      STORM_CATALOG_NAMESPACE);
 +              }
 +              else
 +                      return;
 +
 +              if (get_func_namespace(funcid) != STORM_CATALOG_NAMESPACE)
 +                      ereport(WARNING,
 +                                      (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
 +                                       errmsg("Entry (%s) present in storm_catalog_remap_string "
 +                                               "but object not picked from STORM_CATALOG", funcname)));
 +              else /* change the funcid to the storm_catalog one */
 +                      funcexpr->funcid = funcid;
 +      }
 +}
 +#endif
++
+ /*
+  * Coverage testing for raw_expression_tree_walker().
+  *
+  * When enabled, we run raw_expression_tree_walker() over every DML statement
+  * submitted to parse analysis.  Without this provision, that function is only
+  * applied in limited cases involving CTEs, and we don't really want to have
+  * to test everything inside as well as outside a CTE.
+  */
+ #ifdef RAW_EXPRESSION_COVERAGE_TEST
+ static bool
+ test_raw_expression_coverage(Node *node, void *context)
+ {
+       if (node == NULL)
+               return false;
+       return raw_expression_tree_walker(node,
+                                                                         test_raw_expression_coverage,
+                                                                         context);
+ }
+ #endif   /* RAW_EXPRESSION_COVERAGE_TEST */
index 677b40c0d5473cfa28cb214d5ce0b05b2ffe8ec1,6a0f7b393cb9655289cc647bfbc347142d8e2fa9..796292537f56eb2e1e7409dbb60910b509738165
@@@ -6,9 -6,8 +6,9 @@@
   * gram.y
   *      POSTGRESQL BISON rules/actions
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
  
  #include "catalog/index.h"
  #include "catalog/namespace.h"
+ #include "catalog/pg_am.h"
  #include "catalog/pg_trigger.h"
  #include "commands/defrem.h"
 +#include "miscadmin.h"
  #include "commands/trigger.h"
  #include "nodes/makefuncs.h"
  #include "nodes/nodeFuncs.h"
@@@ -280,9 -265,7 +282,9 @@@ static Node *makeRecursiveViewSelect(ch
                DeallocateStmt PrepareStmt ExecuteStmt
                DropOwnedStmt ReassignOwnedStmt
                AlterTSConfigurationStmt AlterTSDictionaryStmt
-               CreateMatViewStmt RefreshMatViewStmt
 +              BarrierStmt PauseStmt AlterNodeStmt CreateNodeStmt DropNodeStmt
 +              CreateNodeGroupStmt DropNodeGroupStmt
+               CreateMatViewStmt RefreshMatViewStmt CreateAmStmt
  
  %type <node>  select_no_parens select_with_parens select_clause
                                simple_select values_clause
        CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURSOR CYCLE
  
        DATA_P DATABASE DAY_P DEALLOCATE DEC DECIMAL_P DECLARE DEFAULT DEFAULTS
-       DEFERRABLE DEFERRED DEFINER DELETE_P DELIMITER DELIMITERS DESC
+       DEFERRABLE DEFERRED DEFINER DELETE_P DELIMITER DELIMITERS DEPENDS DESC
 -      DICTIONARY DISABLE_P DISCARD DISTINCT DO DOCUMENT_P DOMAIN_P DOUBLE_P DROP
 +/* PGXC_BEGIN */
 +      DICTIONARY DIRECT DISABLE_P DISCARD DISTINCT DISTKEY DISTRIBUTE DISTRIBUTED
 +      DISTSTYLE DO DOCUMENT_P DOMAIN_P DOUBLE_P
 +/* PGXC_END */
 +      DROP
  
        EACH ELSE ENABLE_P ENCODING ENCRYPTED END_P ENUM_P ESCAPE EVENT EXCEPT
        EXCLUDE EXCLUDING EXCLUSIVE EXECUTE EXISTS EXPLAIN
        LEADING LEAKPROOF LEAST LEFT LEVEL LIKE LIMIT LISTEN LOAD LOCAL
        LOCALTIME LOCALTIMESTAMP LOCATION LOCK_P LOCKED LOGGED
  
-       MAPPING MATCH MATERIALIZED MAXVALUE MINUTE_P MINVALUE MODE MONTH_P MOVE
+       MAPPING MATCH MATERIALIZED MAXVALUE METHOD MINUTE_P MINVALUE MODE MONTH_P MOVE
  
 -      NAME_P NAMES NATIONAL NATURAL NCHAR NEXT NO NONE
 +      NAME_P NAMES NATIONAL NATURAL NCHAR NEXT NO NODE NONE
        NOT NOTHING NOTIFY NOTNULL NOWAIT NULL_P NULLIF
        NULLS_P NUMERIC
  
        OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTION OPTIONS OR
        ORDER ORDINALITY OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER
  
-       PARSER PARTIAL PARTITION PASSING PASSWORD PAUSE PLACING PLANS POLICY POSITION
- /* PGXC_BEGIN */
-       PRECEDING PRECISION PREFERRED PRESERVE PREPARE PREPARED PRIMARY
- /* PGXC_END */
 -      PARALLEL PARSER PARTIAL PARTITION PASSING PASSWORD PLACING PLANS POLICY
 -      POSITION PRECEDING PRECISION PRESERVE PREPARE PREPARED PRIMARY
++      PARALLEL PARSER PARTIAL PARTITION PASSING PASSWORD PAUSE PLACING PLANS POLICY
++      POSITION PRECEDING PRECISION PREFERRED PRESERVE PREPARE PREPARED PRIMARY
        PRIOR PRIVILEGES PROCEDURAL PROCEDURE PROGRAM
  
        QUOTE
@@@ -852,9 -768,10 +851,11 @@@ stmt 
                        | AlterForeignTableStmt
                        | AlterFunctionStmt
                        | AlterGroupStmt
 +                      | AlterNodeStmt
+                       | AlterObjectDependsStmt
                        | AlterObjectSchemaStmt
                        | AlterOwnerStmt
+                       | AlterOperatorStmt
                        | AlterPolicyStmt
                        | AlterSeqStmt
                        | AlterSystemStmt
@@@ -14228,16 -13784,10 +14375,17 @@@ unreserved_keyword
                        | DELETE_P
                        | DELIMITER
                        | DELIMITERS
+                       | DEPENDS
                        | DICTIONARY
 +                      | DIRECT
                        | DISABLE_P
                        | DISCARD
 +/* PGXC_BEGIN */
 +                      | DISTKEY
 +                      | DISTRIBUTE
 +                      | DISTRIBUTED
 +                      | DISTSTYLE
 +/* PGXC_END */
                        | DOCUMENT_P
                        | DOMAIN_P
                        | DOUBLE_P
index 4812fb1a67f89a41a319b452cfaf90f8e72dcbe8,481a4ddc4847d49d73bef4181a891210248e7721..6bb7c698e7f663ff0d423a5b084b021bf12e4cbb
@@@ -3,8 -3,7 +3,8 @@@
   * parse_agg.c
   *      handle aggregates and window functions in parser
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -1842,40 -1844,27 +1852,30 @@@ resolve_aggregate_transtype(Oid aggfunc
   * For an ordered-set aggregate, remember that agg_input_types describes
   * the direct arguments followed by the aggregated arguments.
   *
-  * transfn_oid, invtransfn_oid and finalfn_oid identify the funcs to be
-  * called; the latter two may be InvalidOid.
+  * transfn_oid and invtransfn_oid identify the funcs to be called; the
+  * latter may be InvalidOid, however if invtransfn_oid is set then
+  * transfn_oid must also be set.
   *
   * Pointers to the constructed trees are returned into *transfnexpr,
-  * *invtransfnexpr and *finalfnexpr. If there is no invtransfn or finalfn,
-  * the respective pointers are set to NULL.  Since use of the invtransfn is
-  * optional, NULL may be passed for invtransfnexpr.
+  * *invtransfnexpr. If there is no invtransfn, the respective pointer is set
+  * to NULL.  Since use of the invtransfn is optional, NULL may be passed for
+  * invtransfnexpr.
   */
  void
- build_aggregate_fnexprs(Oid *agg_input_types,
-                                               int agg_num_inputs,
-                                               int agg_num_direct_inputs,
-                                               int num_finalfn_inputs,
-                                               bool agg_variadic,
-                                               Oid agg_state_type,
- #ifdef XCP
-                                               Oid agg_collect_type,
- #endif
-                                               Oid agg_result_type,
-                                               Oid agg_input_collation,
-                                               Oid transfn_oid,
- #ifdef XCP
-                                               Oid collectfn_oid,
- #endif
-                                               Oid invtransfn_oid,
-                                               Oid finalfn_oid,
-                                               Expr **transfnexpr,
-                                               Expr **invtransfnexpr,
- #ifdef XCP
-                                               Expr **collectfnexpr,
- #endif
-                                               Expr **finalfnexpr)
+ build_aggregate_transfn_expr(Oid *agg_input_types,
+                                                        int agg_num_inputs,
+                                                        int agg_num_direct_inputs,
+                                                        bool agg_variadic,
+                                                        Oid agg_state_type,
++                                                       Oid agg_collect_type,
+                                                        Oid agg_input_collation,
+                                                        Oid transfn_oid,
++                                                       Oid collectfn_oid,
+                                                        Oid invtransfn_oid,
+                                                        Expr **transfnexpr,
 -                                                       Expr **invtransfnexpr)
++                                                       Expr **invtransfnexpr,
++                                                       Expr **collectfnexpr)
  {
-       Param      *argp;
        List       *args;
        FuncExpr   *fexpr;
        int                     i;
                else
                        *invtransfnexpr = NULL;
        }
 +#ifdef XCP
 +      /* see if we have a collect function */
 +      if (OidIsValid(collectfn_oid))
 +      {
 +              Param      *argp2;
 +              /*
 +               * Build expr tree for collect function
 +               */
 +              argp = makeNode(Param);
 +              argp->paramkind = PARAM_EXEC;
 +              argp->paramid = -1;
 +              argp->paramtype = agg_collect_type;
 +              argp->paramtypmod = -1;
 +              argp->location = -1;
 +
 +              argp2 = makeNode(Param);
 +              argp2->paramkind = PARAM_EXEC;
 +              argp2->paramid = -1;
 +              argp2->paramtype = agg_state_type;
 +              argp2->paramtypmod = -1;
 +              argp2->location = -1;
 +              args = list_make2(argp, argp2);
 +
 +              *collectfnexpr = (Expr *) makeFuncExpr(collectfn_oid,
 +                                                                                       agg_collect_type,
 +                                                                                       args,
 +                                                                                       InvalidOid,
 +                                                                                       agg_input_collation,
 +                                                                                       COERCE_EXPLICIT_CALL);
 +      }
 +      else
 +              *collectfnexpr = NULL;
 +#endif
 +
 +      /* see if we have a final function */
 +      if (!OidIsValid(finalfn_oid))
 +      {
 +              *finalfnexpr = NULL;
 +              return;
 +      }
+ }
+ /*
+  * Like build_aggregate_transfn_expr, but creates an expression tree for the
+  * combine function of an aggregate, rather than the transition function.
+  */
+ void
+ build_aggregate_combinefn_expr(Oid agg_state_type,
+                                                          Oid agg_input_collation,
+                                                          Oid combinefn_oid,
+                                                          Expr **combinefnexpr)
+ {
+       Node       *argp;
+       List       *args;
+       FuncExpr   *fexpr;
+       /* combinefn takes two arguments of the aggregate state type */
+       argp = make_agg_arg(agg_state_type, agg_input_collation);
+       args = list_make2(argp, argp);
+       fexpr = makeFuncExpr(combinefn_oid,
+                                                agg_state_type,
+                                                args,
+                                                InvalidOid,
+                                                agg_input_collation,
+                                                COERCE_EXPLICIT_CALL);
+       /* combinefn is currently never treated as variadic */
+       *combinefnexpr = (Expr *) fexpr;
+ }
+ /*
+  * Like build_aggregate_transfn_expr, but creates an expression tree for the
+  * serialization function of an aggregate.
+  */
+ void
+ build_aggregate_serialfn_expr(Oid serialfn_oid,
+                                                         Expr **serialfnexpr)
+ {
+       List       *args;
+       FuncExpr   *fexpr;
+       /* serialfn always takes INTERNAL and returns BYTEA */
+       args = list_make1(make_agg_arg(INTERNALOID, InvalidOid));
+       fexpr = makeFuncExpr(serialfn_oid,
+                                                BYTEAOID,
+                                                args,
+                                                InvalidOid,
+                                                InvalidOid,
+                                                COERCE_EXPLICIT_CALL);
+       *serialfnexpr = (Expr *) fexpr;
+ }
+ /*
+  * Like build_aggregate_transfn_expr, but creates an expression tree for the
+  * deserialization function of an aggregate.
+  */
+ void
+ build_aggregate_deserialfn_expr(Oid deserialfn_oid,
+                                                               Expr **deserialfnexpr)
+ {
+       List       *args;
+       FuncExpr   *fexpr;
+       /* deserialfn always takes BYTEA, INTERNAL and returns INTERNAL */
+       args = list_make2(make_agg_arg(BYTEAOID, InvalidOid),
+                                         make_agg_arg(INTERNALOID, InvalidOid));
+       fexpr = makeFuncExpr(deserialfn_oid,
+                                                INTERNALOID,
+                                                args,
+                                                InvalidOid,
+                                                InvalidOid,
+                                                COERCE_EXPLICIT_CALL);
+       *deserialfnexpr = (Expr *) fexpr;
+ }
+ /*
+  * Like build_aggregate_transfn_expr, but creates an expression tree for the
+  * final function of an aggregate, rather than the transition function.
+  */
+ void
+ build_aggregate_finalfn_expr(Oid *agg_input_types,
+                                                        int num_finalfn_inputs,
+                                                        Oid agg_state_type,
+                                                        Oid agg_result_type,
+                                                        Oid agg_input_collation,
+                                                        Oid finalfn_oid,
+                                                        Expr **finalfnexpr)
+ {
+       List       *args;
+       int                     i;
  
        /*
         * Build expr tree for final function
         */
-       argp = makeNode(Param);
-       argp->paramkind = PARAM_EXEC;
-       argp->paramid = -1;
-       /*
-        * When running Phase 2 of distributed aggregation we may have only
-        * transient and final functions defined.
-        */
 +#ifdef XCP
 +      if (OidIsValid(agg_collect_type))
 +              argp->paramtype = agg_collect_type;
 +      else
 +#endif
-       argp->paramtype = agg_state_type;
-       argp->paramtypmod = -1;
-       argp->paramcollid = agg_input_collation;
-       argp->location = -1;
-       args = list_make1(argp);
+       args = list_make1(make_agg_arg(agg_state_type, agg_input_collation));
  
        /* finalfn may take additional args, which match agg's input types */
        for (i = 0; i < num_finalfn_inputs - 1; i++)
Simple merge
Simple merge
Simple merge
index 714e89e3ebceb8516de4c3e202a1dc44994f61d5,1e3ecbc51ef288a9c4462662614bc463600028b0..c10e272d72866b8906e1d0254dfd0b97c198729c
@@@ -3,8 -3,7 +3,8 @@@
   * parse_relation.c
   *      parser support routines dealing with relations
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
Simple merge
Simple merge
index 0e899f1997716103013a78d48598d822a0869c57,e98fad051e4e915679a25e4815c4d385a4c7b159..707a1c5b60ff3472c088d2ddab8f8e6c7d8735fc
   * a quick copyObject() call before manipulating the query tree.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *    src/backend/parser/parse_utilcmd.c
   *
Simple merge
Simple merge
index e3ce6c8c2585107852fcf4f7d2152830b4e2967e,3768f50bcf3aa2b2700c2e586e117d1ff3465399..1b5328e479771d7b9da390b23c1e30061fb03876
@@@ -50,8 -50,7 +50,8 @@@
   * there is a window (caused by pgstat delay) on which a worker may choose a
   * table that was already vacuumed; this is a bug in the current design.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index 7be5f28d47683fd21fc3a975794b360ad8136a53,8fa9edbf7293799c67be691950a3bc99a03dbe67..181f14ee7444d2d153b595163faf8c99f5403bdb
@@@ -11,8 -11,7 +11,8 @@@
   *                    - Add a pgstat config column to pg_database, so this
   *                      entire thing can be enabled/disabled on a per db basis.
   *
-  *    Copyright (c) 2001-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  *    Copyright (c) 2001-2016, PostgreSQL Global Development Group
   *
   *    src/backend/postmaster/pgstat.c
   * ----------
index 1ca68e69c01288747f4c56b61d49cd78d1c3996e,f5c8e9d812c234b1b8c61c06ad879ab781b4b5a1..520616e4496e129033bf260975dba3c518417a97
   *      clients.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
@@@ -1348,19 -1292,9 +1394,20 @@@ PostmasterMain(int argc, char *argv[]
         */
        StartupPID = StartupDataBase();
        Assert(StartupPID != 0);
+       StartupStatus = STARTUP_RUNNING;
        pmState = PM_STARTUP;
  
 +#ifdef PGXC /* PGXC_COORD */
 +      oldcontext = MemoryContextSwitchTo(TopMemoryContext);
 +
 +      /*
 +       * Initialize the Data Node connection pool
 +       */
 +      PgPoolerPID = StartPoolManager();
 +
 +      MemoryContextSwitchTo(oldcontext);
 +#endif /* PGXC */
 +
        /* Some workers may be scheduled to start now */
        maybe_start_bgworker();
  
@@@ -1777,21 -1748,9 +1861,21 @@@ ServerLoop(void
                if (PgStatPID == 0 && pmState == PM_RUN)
                        PgStatPID = pgstat_start();
  
 +#ifdef PGXC
 +              /* If we have lost the pooler, try to start a new one */
 +              if (PgPoolerPID == 0 && pmState == PM_RUN)
 +                      PgPoolerPID = StartPoolManager();
 +#endif /* PGXC */
 +
 +#ifdef XCP
 +              /* If we have lost the cluster monitor, try to start a new one */
 +              if (ClusterMonPID == 0 && pmState == PM_RUN)
 +                      ClusterMonPID = StartClusterMonitor();
 +#endif
 +
                /* If we have lost the archiver, try to start a new one. */
                if (PgArchPID == 0 && PgArchStartupAllowed())
-                               PgArchPID = pgarch_start();
+                       PgArchPID = pgarch_start();
  
                /* If we need to signal the autovacuum launcher, do so now */
                if (avlauncher_needs_signal)
@@@ -2483,17 -2474,8 +2599,16 @@@ SIGHUP_handler(SIGNAL_ARGS
                                (errmsg("received SIGHUP, reloading configuration files")));
                ProcessConfigFile(PGC_SIGHUP);
                SignalChildren(SIGHUP);
-               SignalUnconnectedWorkers(SIGHUP);
                if (StartupPID != 0)
                        signal_child(StartupPID, SIGHUP);
 +#ifdef PGXC /* PGXC_COORD */
 +              if (PgPoolerPID != 0)
 +                      signal_child(PgPoolerPID, SIGHUP);
 +#endif /* PGXC */
 +#ifdef XCP
 +              if (ClusterMonPID != 0)
 +                      signal_child(ClusterMonPID, SIGHUP);
 +#endif
                if (BgWriterPID != 0)
                        signal_child(BgWriterPID, SIGHUP);
                if (CheckpointerPID != 0)
@@@ -2628,19 -2607,12 +2748,20 @@@ pmdie(SIGNAL_ARGS
                                signal_child(BgWriterPID, SIGTERM);
                        if (WalReceiverPID != 0)
                                signal_child(WalReceiverPID, SIGTERM);
-                       SignalUnconnectedWorkers(SIGTERM);
 +#ifdef XCP
 +                      /* and the pool manager too */
 +                      if (PgPoolerPID != 0)
 +                              signal_child(PgPoolerPID, SIGTERM);
 +                      /* and the cluster monitor too */
 +                      if (ClusterMonPID != 0)
 +                              signal_child(ClusterMonPID, SIGTERM);
 +#endif /* XCP */
                        if (pmState == PM_RECOVERY)
                        {
+                               SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER);
                                /*
-                                * Only startup, bgwriter, walreceiver, unconnected bgworkers,
+                                * Only startup, bgwriter, walreceiver, possibly bgworkers,
                                 * and/or checkpointer should be active in this state; we just
                                 * signaled the first four, and we don't want to kill
                                 * checkpointer yet.
@@@ -3654,14 -3570,7 +3784,13 @@@ PostmasterStateMachine(void
                 * process.
                 */
                if (CountChildren(BACKEND_TYPE_NORMAL | BACKEND_TYPE_WORKER) == 0 &&
-                       CountUnconnectedWorkers() == 0 &&
                        StartupPID == 0 &&
 +#ifdef PGXC
 +                      PgPoolerPID == 0 &&
 +#endif
 +#ifdef XCP
 +                      ClusterMonPID == 0 &&
 +#endif
                        WalReceiverPID == 0 &&
                        BgWriterPID == 0 &&
                        (CheckpointerPID == 0 ||
@@@ -3972,15 -3844,11 +4070,19 @@@ TerminateChildren(int signal
  {
        SignalChildren(signal);
        if (StartupPID != 0)
+       {
                signal_child(StartupPID, signal);
+               if (signal == SIGQUIT || signal == SIGKILL)
+                       StartupStatus = STARTUP_SIGNALED;
+       }
 +#ifdef PGXC /* PGXC_COORD */
 +      if (PgPoolerPID != 0)
 +              signal_child(PgPoolerPID, SIGQUIT);
 +#endif
 +#ifdef XCP
 +      if (ClusterMonPID != 0)
 +              signal_child(ClusterMonPID, signal);
 +#endif
        if (BgWriterPID != 0)
                signal_child(BgWriterPID, signal);
        if (CheckpointerPID != 0)
index 2d63e3857f47a39b6ba91007084062a6ee720d19,46cd5ba1f2ded5b68ce5246384d19f4c53ecca58..d5959d524715be000a4c85c7aac327336d568a92
@@@ -134,10 -148,11 +148,14 @@@ LogicalDecodingProcessRecord(LogicalDec
                case RM_SPGIST_ID:
                case RM_BRIN_ID:
                case RM_COMMIT_TS_ID:
 +#ifdef PGXC 
 +              case RM_BARRIER_ID:
 +#endif
                case RM_REPLORIGIN_ID:
+               case RM_GENERIC_ID:
+                       /* just deal with xid, and done */
+                       ReorderBufferProcessXid(ctx->reorder, XLogRecGetXid(record),
+                                                                       buf.origptr);
                        break;
                case RM_NEXT_ID:
                        elog(ERROR, "unexpected RM_NEXT_ID rmgr_id: %u", (RmgrIds) XLogRecGetRmid(buf.record));
index 042ca80932e3a156b1d55bdc68bdf63853be112a,4e4c8cdaeb21427c9e93526f97533f36046530ef..fd6013d6c2abdcdf1594b04e0c8174781228eab5
@@@ -23,6 -23,8 +23,7 @@@
  
  #include "access/xlog_internal.h"
  #include "access/xlogutils.h"
 -
+ #include "access/xact.h"
  
  #include "catalog/pg_type.h"
  
Simple merge
Simple merge
Simple merge
index 1bbd238cbe5807dd0d06212883cb85d4fb0e2ef8,76ade3727cd0ee8dc7364fdae21789c6a26847d0..90239e6abf75ebec458bb418af3c6d56a0020d44
@@@ -3,8 -3,7 +3,8 @@@
   * bufmgr.c
   *      buffer manager interface routines
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
Simple merge
Simple merge
index 4535a3eb82d1187e844465f398da0c8b64cfaf34,c04b17fa8ead59f8190a3fa7d880197e9cff9235..7887d82a6e269f34338e34f48afb267f854af934
@@@ -3,8 -3,7 +3,8 @@@
   * ipci.c
   *      POSTGRES inter-process communication initialization code.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  #include "storage/procsignal.h"
  #include "storage/sinvaladt.h"
  #include "storage/spin.h"
 +#ifdef XCP
 +#include "pgxc/pgxc.h"
 +#include "pgxc/squeue.h"
 +#include "pgxc/pause.h"
 +#endif
+ #include "utils/snapmgr.h"
  
 -
  shmem_startup_hook_type shmem_startup_hook = NULL;
  
  static Size total_addin_request = 0;
@@@ -146,13 -137,7 +147,14 @@@ CreateSharedMemoryAndSemaphores(bool ma
                size = add_size(size, ReplicationOriginShmemSize());
                size = add_size(size, WalSndShmemSize());
                size = add_size(size, WalRcvShmemSize());
 +#ifdef XCP
 +              if (IS_PGXC_DATANODE)
 +                      size = add_size(size, SharedQueueShmemSize());
 +              if (IS_PGXC_COORDINATOR)
 +                      size = add_size(size, ClusterLockShmemSize());
 +              size = add_size(size, ClusterMonitorShmemSize());
 +#endif
+               size = add_size(size, SnapMgrShmemSize());
                size = add_size(size, BTreeShmemSize());
                size = add_size(size, SyncScanShmemSize());
                size = add_size(size, AsyncShmemSize());
index 885a92909e0ae009a7539358f7855fae21fb47ba,e5d487dbb74cc8cd1b29ffc95e251c5a43813cc4..a66cb2468d4682c7c23e060804c4b2154aa6f4ea
   * happen, it would tie up KnownAssignedXids indefinitely, so we protect
   * ourselves by pruning the array when a valid list of running XIDs arrives.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
@@@ -215,11 -170,10 +218,14 @@@ static int KnownAssignedXidsGetAndSetXm
  static TransactionId KnownAssignedXidsGetOldestXmin(void);
  static void KnownAssignedXidsDisplay(int trace_level);
  static void KnownAssignedXidsReset(void);
+ static inline void ProcArrayEndTransactionInternal(PGPROC *proc,
+                                                               PGXACT *pgxact, TransactionId latestXid);
+ static void ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid);
  
 +#ifdef XCP
 +int   GlobalSnapshotSource;
 +#endif
 +
  /*
   * Report shared-memory space needed by CreateSharedProcArray.
   */
@@@ -452,38 -410,20 +465,30 @@@ ProcArrayEndTransaction(PGPROC *proc, T
                 * else is taking a snapshot.  See discussion in
                 * src/backend/access/transam/README.
                 */
 +#ifdef PGXC
 +              /*
 +               * Remove this assertion. We have seen this failing because a ROLLBACK
 +               * statement may get canceled by a Coordinator, leading to recursive
 +               * abort of a transaction. This must be a PostgreSQL issue, highlighted
 +               * by XC. See thread on hackers with subject "Canceling ROLLBACK
 +               * statement"
 +               */
 +#else
                Assert(TransactionIdIsValid(allPgXact[proc->pgprocno].xid));
 +#endif
  
-               LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
-               pgxact->xid = InvalidTransactionId;
-               proc->lxid = InvalidLocalTransactionId;
-               pgxact->xmin = InvalidTransactionId;
-               /* must be cleared with xid/xmin: */
-               pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
-               pgxact->delayChkpt = false;             /* be sure this is cleared in abort */
-               proc->recoveryConflictPending = false;
-               /* Clear the subtransaction-XID cache too while holding the lock */
-               pgxact->nxids = 0;
-               pgxact->overflowed = false;
-               /* Also advance global latestCompletedXid while holding the lock */
-               if (TransactionIdPrecedes(ShmemVariableCache->latestCompletedXid,
-                                                                 latestXid))
-                       ShmemVariableCache->latestCompletedXid = latestXid;
-               LWLockRelease(ProcArrayLock);
+               /*
+                * If we can immediately acquire ProcArrayLock, we clear our own XID
+                * and release the lock.  If not, use group XID clearing to improve
+                * efficiency.
+                */
+               if (LWLockConditionalAcquire(ProcArrayLock, LW_EXCLUSIVE))
+               {
+                       ProcArrayEndTransactionInternal(proc, pgxact, latestXid);
+                       LWLockRelease(ProcArrayLock);
+               }
+               else
+                       ProcArrayGroupClearXid(proc, latestXid);
        }
        else
        {
index 82f1ae837a592e43004c3b25bd197d5f635bddc5,a3d6ac5318a520f40c016f8a64aed2e0ba4c9a25..0d2d1b08435ac73acc5c9e6a17ae1a532a478581
@@@ -4,8 -4,7 +4,8 @@@
   *      Routines for interprocess signalling
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
Simple merge
index a5f916ca01a77fc53615c9147974e1419d4e4d97,dba3809e7406bfe4954766c71ccb636a415c03eb..1c5fa1e1e6d6c4748e458cab71e54f93598bb856
@@@ -3,8 -3,7 +3,8 @@@
   * lock.c
   *      POSTGRES primary lock mechanism
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -1349,91 -1354,49 +1404,121 @@@ LockCheckConflicts(LockMethod lockMetho
        }
  
        /*
-        * now check again for conflicts.  'otherLocks' describes the types of
-        * locks held by other processes.  If one of these conflicts with the kind
-        * of lock that I want, there is a conflict and I have to sleep.
+        * Locks held in conflicting modes by members of our own lock group are
+        * not real conflicts; we can subtract those out and see if we still have
+        * a conflict.  This is O(N) in the number of processes holding or
+        * awaiting locks on this object.  We could improve that by making the
+        * shared memory state more complex (and larger) but it doesn't seem worth
+        * it.
         */
-       if (!(lockMethodTable->conflictTab[lockmode] & otherLocks))
+       procLocks = &(lock->procLocks);
+       otherproclock = (PROCLOCK *)
+               SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
+       while (otherproclock != NULL)
        {
-               /* no conflict. OK to get the lock */
-               PROCLOCK_PRINT("LockCheckConflicts: resolved", proclock);
-               return STATUS_OK;
-       }
+               if (proclock != otherproclock &&
+                       proclock->groupLeader == otherproclock->groupLeader &&
+                       (otherproclock->holdMask & conflictMask) != 0)
+               {
+                       int                     intersectMask = otherproclock->holdMask & conflictMask;
+                       for (i = 1; i <= numLockModes; i++)
+                       {
+                               if ((intersectMask & LOCKBIT_ON(i)) != 0)
+                               {
+                                       if (conflictsRemaining[i] <= 0)
+                                               elog(PANIC, "proclocks held do not match lock");
+                                       conflictsRemaining[i]--;
+                                       totalConflictsRemaining--;
+                               }
+                       }
  
+                       if (totalConflictsRemaining == 0)
+                       {
+                               PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
+                                                          proclock);
+                               return STATUS_OK;
+                       }
+               }
+               otherproclock = (PROCLOCK *)
+                       SHMQueueNext(procLocks, &otherproclock->lockLink,
+                                                offsetof(PROCLOCK, lockLink));
+       }
  
-       PROCLOCK_PRINT("LockCheckConflicts: conflicting", proclock);
 +#ifdef XCP
 +      /*
 +       * So the lock is conflicting with locks held by some other backend.
 +       * But the backend may belong to the same distributed session. We need to
 +       * detect such cases and either allow the lock or throw error, because
 +       * waiting for the lock most probably would cause deadlock.
 +       */
 +      LWLockAcquire(ProcArrayLock, LW_SHARED);
 +      if (proc->coordPid > 0)
 +      {
 +              /* Count locks held by this process and friends */
 +              int myHolding[numLockModes + 1];
 +              SHM_QUEUE  *procLocks;
 +              PROCLOCK   *nextplock;
 +
 +              /* Initialize the counters */
 +              for (i = 1; i <= numLockModes; i++)
 +                      myHolding[i] = 0;
 +              otherLocks = 0;
 +
 +              /* Iterate over processes associated with the lock */
 +              procLocks = &(lock->procLocks);
 +
 +              nextplock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
 +                                                                                        offsetof(PROCLOCK, lockLink));
 +              while (nextplock)
 +              {
 +                      PGPROC *nextproc = nextplock->tag.myProc;
 +
 +                      if (nextproc->coordPid == proc->coordPid &&
 +                                      nextproc->coordId == proc->coordId)
 +                      {
 +                              /*
 +                               * The process belongs to same distributed session, count locks
 +                               */
 +                              myLocks = nextplock->holdMask;
 +                              for (i = 1; i <= numLockModes; i++)
 +                                      myHolding[i] += ((myLocks & LOCKBIT_ON(i)) ? 1 : 0);
 +                      }
 +                      /* get next proclock */
 +                      nextplock = (PROCLOCK *)
 +                                      SHMQueueNext(procLocks, &nextplock->lockLink,
 +                                                               offsetof(PROCLOCK, lockLink));
 +              }
 +
 +              /* Summarize locks held by other processes */
 +              for (i = 1; i <= numLockModes; i++)
 +              {
 +                      if (lock->granted[i] > myHolding[i])
 +                              otherLocks |= LOCKBIT_ON(i);
 +              }
 +
 +              /*
 +               * Yet another check.
 +               */
 +              if (!(lockMethodTable->conflictTab[lockmode] & otherLocks))
 +              {
 +                      LWLockRelease(ProcArrayLock);
 +                      /* no conflict. OK to get the lock */
 +                      PROCLOCK_PRINT("LockCheckConflicts: resolved as held by friend",
 +                                                 proclock);
 +#ifdef LOCK_DEBUG
 +                      elog(LOG, "Allow lock as held by the same distributed session [%u,%u] %s",
 +                               lock->tag.locktag_field1, lock->tag.locktag_field2,
 +                               lockMethodTable->lockModeNames[lockmode]);
 +#endif
 +                      return STATUS_OK;
 +              }
 +      }
 +      LWLockRelease(ProcArrayLock);
 +#endif
 +
+       /* Nope, it's a real conflict. */
+       PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
        return STATUS_FOUND;
  }
  
index 8bdf4e6eaaa2edfe4e67afb6ed71343e242bb3db,7ffa87d914b63ece475ba18cc313deef2d9aea68..2199cf43c86c8e1e329da6dd5470b9eda5336d36
   * locking should be done with the full lock manager --- which depends on
   * LWLocks to protect its shared state.
   *
-  * In addition to exclusive and shared modes, lightweight locks can be used
-  * to wait until a variable changes value.  The variable is initially set
-  * when the lock is acquired with LWLockAcquireWithVar, and can be updated
+  * In addition to exclusive and shared modes, lightweight locks can be used to
+  * wait until a variable changes value.  The variable is initially not set
+  * when the lock is acquired with LWLockAcquire, i.e. it remains set to the
+  * value it was set to when the lock was released last, and can be updated
   * without releasing the lock by calling LWLockUpdateVar.  LWLockWaitForVar
-  * waits for the variable to be updated, or until the lock is free.  The
-  * meaning of the variable is up to the caller, the lightweight lock code
-  * just assigns and compares it.
+  * waits for the variable to be updated, or until the lock is free.  When
+  * releasing the lock with LWLockReleaseClearVar() the value can be set to an
+  * appropriate value for a free lock.  The meaning of the variable is up to
+  * the caller, the lightweight lock code just assigns and compares it.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
@@@ -98,7 -91,8 +96,7 @@@
  #include "utils/hsearch.h"
  #endif
  
- /* We use the ShmemLock spinlock to protect LWLockAssign */
 -
+ /* We use the ShmemLock spinlock to protect LWLockCounter */
  extern slock_t *ShmemLock;
  
  #define LW_FLAG_HAS_WAITERS                   ((uint32) 1 << 30)
@@@ -323,62 -359,17 +363,22 @@@ get_lwlock_stats_entry(LWLock *lock
  
  
  /*
-  * Compute number of LWLocks to allocate in the main array.
+  * Compute number of LWLocks required by named tranches.  These will be
+  * allocated in the main array.
   */
  static int
- NumLWLocks(void)
+ NumLWLocksByNamedTranches(void)
  {
-       int                     numLocks;
-       /*
-        * Possibly this logic should be spread out among the affected modules,
-        * the same way that shmem space estimation is done.  But for now, there
-        * are few enough users of LWLocks that we can get away with just keeping
-        * the knowledge here.
-        */
-       /* Predefined LWLocks */
-       numLocks = NUM_FIXED_LWLOCKS;
-       /* bufmgr.c needs two for each shared buffer */
-       numLocks += 2 * NBuffers;
-       /* proc.c needs one for each backend or auxiliary process */
-       numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
-       /* clog.c needs one per CLOG buffer */
-       numLocks += CLOGShmemBuffers();
-       /* commit_ts.c needs one per CommitTs buffer */
-       numLocks += CommitTsShmemBuffers();
-       /* subtrans.c needs one per SubTrans buffer */
-       numLocks += NUM_SUBTRANS_BUFFERS;
-       /* multixact.c needs two SLRU areas */
-       numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;
-       /* async.c needs one per Async buffer */
-       numLocks += NUM_ASYNC_BUFFERS;
-       /* predicate.c needs one per old serializable xid buffer */
-       numLocks += NUM_OLDSERXID_BUFFERS;
+       int                     numLocks = 0;
+       int                     i;
  
-       /* slot.c needs one for each slot */
-       numLocks += max_replication_slots;
-       /*
-        * Add any requested by loadable modules; for backwards-compatibility
-        * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if
-        * there are no explicit requests.
-        */
-       lock_addin_request_allowed = false;
-       numLocks += Max(lock_addin_request, NUM_USER_DEFINED_LWLOCKS);
 +#ifdef XCP
 +      /* squeue.c needs one per consumer node in each shared queue.
 +       * Max number of consumers is MaxDataNodes-1 */
 +      numLocks += NUM_SQUEUES * (MaxDataNodes-1);
 +#endif
+       for (i = 0; i < NamedLWLockTrancheRequests; i++)
+               numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
  
        return numLocks;
  }
Simple merge
index 7372fd09491bb36bda1cc11130446dc2bed58365,9a758bd91600b0839afadf3e3907e06a0902f8d5..d876625166dd60ed3c4997a98402ffffb4dc45b4
@@@ -3,8 -3,7 +3,8 @@@
   * proc.c
   *      routines to manage per-process shared memory data structure
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  #include "access/xact.h"
  #include "miscadmin.h"
  #include "postmaster/autovacuum.h"
 +#ifdef PGXC
 +#include "pgxc/pgxc.h"
 +#include "pgxc/poolmgr.h"
 +#endif
  #include "replication/slot.h"
  #include "replication/syncrep.h"
+ #include "storage/standby.h"
  #include "storage/ipc.h"
  #include "storage/lmgr.h"
  #include "storage/pmsignal.h"
index 8610bbe1383ef1a78c5dea1ad2674b692ec9e184,de45cbc4fb8e8867464b39b25600166a230a9964..f1905d2f80a1b9845a95a81b5396029afd9ac357
@@@ -4,8 -4,7 +4,8 @@@
   *      support for communication destinations
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
  #include "commands/createas.h"
  #include "commands/matview.h"
  #include "executor/functions.h"
 +#ifdef XCP
 +#include "executor/producerReceiver.h"
 +#endif
+ #include "executor/tqueue.h"
  #include "executor/tstoreReceiver.h"
  #include "libpq/libpq.h"
  #include "libpq/pqformat.h"
@@@ -131,13 -129,11 +133,16 @@@ CreateDestReceiver(CommandDest dest
                case DestSQLFunction:
                        return CreateSQLFunctionDestReceiver();
  
 +#ifdef XCP
 +              case DestProducer:
 +                      return CreateProducerDestReceiver();
 +#endif
 +
                case DestTransientRel:
                        return CreateTransientRelDestReceiver(InvalidOid);
+               case DestTupleQueue:
+                       return CreateTupleQueueDestReceiver(NULL);
        }
  
        /* should never get here */
@@@ -170,8 -166,8 +175,9 @@@ EndCommand(const char *commandTag, Comm
                case DestIntoRel:
                case DestCopyOut:
                case DestSQLFunction:
 +              case DestProducer:
                case DestTransientRel:
+               case DestTupleQueue:
                        break;
        }
  }
@@@ -213,8 -209,8 +219,9 @@@ NullCommand(CommandDest dest
                case DestIntoRel:
                case DestCopyOut:
                case DestSQLFunction:
 +              case DestProducer:
                case DestTransientRel:
+               case DestTupleQueue:
                        break;
        }
  }
@@@ -258,8 -254,8 +265,9 @@@ ReadyForQuery(CommandDest dest
                case DestIntoRel:
                case DestCopyOut:
                case DestSQLFunction:
 +              case DestProducer:
                case DestTransientRel:
+               case DestTupleQueue:
                        break;
        }
  }
index 31283cc6892defc7f4e3df27a23e5b034ae18cdc,b185c1b5eb69fba2654b65a8b80bfe74f03ff600..95cf9847721c0c453b39efc5948c8166591e3f20
@@@ -3,10 -3,8 +3,10 @@@
   * postgres.c
   *      POSTGRES C Backend Interface
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
@@@ -3432,11 -2993,20 +3436,23 @@@ ProcessInterrupts(void
                }
        }
  
+       if (IdleInTransactionSessionTimeoutPending)
+       {
+               /* Has the timeout setting changed since last we looked? */
+               if (IdleInTransactionSessionTimeout > 0)
+                       ereport(FATAL,
+                                       (errcode(ERRCODE_IDLE_IN_TRANSACTION_SESSION_TIMEOUT),
+                                        errmsg("terminating connection due to idle-in-transaction timeout")));
+               else
+                       IdleInTransactionSessionTimeoutPending = false;
+       }
        if (ParallelMessagePending)
                HandleParallelMessages();
 +
 +      if (PoolerMessagesPending())
 +              HandlePoolerMessages();
  }
  
  
@@@ -4052,27 -3580,8 +4075,28 @@@ PostgresMain(int argc, char *argv[]
        StringInfoData input_message;
        sigjmp_buf      local_sigjmp_buf;
        volatile bool send_ready_for_query = true;
+       bool            disable_idle_in_transaction_timeout = false;
  
 +#ifdef PGXC /* PGXC_DATANODE */
 +      /* Snapshot info */
 +      TransactionId                   xmin;
 +      TransactionId                   xmax;
 +      int                                             xcnt;
 +      TransactionId                   *xip;
 +      /* Timestamp info */
 +      TimestampTz             timestamp;
 +
 +      remoteConnType = REMOTE_CONN_APP;
 +#endif
 +
 +#ifdef XCP
 +      parentPGXCNode = NULL;
 +      parentPGXCNodeId = -1;
 +      parentPGXCNodeType = PGXC_NODE_DATANODE;
 +      cluster_lock_held = false;
 +      cluster_ex_lock_held = false;
 +#endif /* XCP */
 +
        /* Initialize startup process environment if necessary. */
        if (!IsUnderPostmaster)
                InitStandaloneProcess(argv[0]);
index 5575489cc1014ea511e41677dc264a765e3f6653,92d07fcb5dbd44e64fa2d2be308c0b3b3d78725a..01a24c451ecd694ce9346a93c23e2816e0a73ead
@@@ -3,8 -3,7 +3,8 @@@
   * pquery.c
   *      POSTGRES process query command code
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -1412,12 -895,8 +1411,12 @@@ PortalRunSelect(Portal portal
  {
        QueryDesc  *queryDesc;
        ScanDirection direction;
-       uint32          nprocessed;
+       uint64          nprocessed;
 +      struct          rusage start_r;
 +      struct          timeval start_t;
  
 +      if (log_executor_stats)
 +              ResetUsageCommon(&start_r, &start_t);
        /*
         * NB: queryDesc will be NULL if we are fetching from a held cursor or a
         * completed utility query; can't use it in that path.
@@@ -1699,19 -1174,21 +1696,27 @@@ PortalRunUtility(Portal portal, Node *u
                  IsA(utilityStmt, ListenStmt) ||
                  IsA(utilityStmt, NotifyStmt) ||
                  IsA(utilityStmt, UnlistenStmt) ||
 +#ifdef PGXC
 +                IsA(utilityStmt, PauseClusterStmt) ||
 +                IsA(utilityStmt, BarrierStmt) ||
 +                (IsA(utilityStmt, CheckPointStmt) && IS_PGXC_DATANODE)))
 +#else
                  IsA(utilityStmt, CheckPointStmt)))
 +#endif
        {
-               PushActiveSnapshot(GetTransactionSnapshot());
-               active_snapshot_set = true;
+               snapshot = GetTransactionSnapshot();
+               /* If told to, register the snapshot we're using and save in portal */
+               if (setHoldSnapshot)
+               {
+                       snapshot = RegisterSnapshot(snapshot);
+                       portal->holdSnapshot = snapshot;
+               }
+               PushActiveSnapshot(snapshot);
+               /* PushActiveSnapshot might have copied the snapshot */
+               snapshot = GetActiveSnapshot();
        }
        else
-               active_snapshot_set = false;
+               snapshot = NULL;
  
        ProcessUtility(utilityStmt,
                                   portal->sourceText,
@@@ -2236,357 -1720,4 +2270,356 @@@ DoPortalRewind(Portal portal
        portal->atStart = true;
        portal->atEnd = false;
        portal->portalPos = 0;
-       portal->posOverflow = false;
  }
 +
 +#ifdef XCP
 +/*
 + * Execute the specified portal's query and distribute tuples to consumers.
 + * Returs 1 if portal should keep producing, 0 if all consumers have enough
 + * rows in the buffers to pause producing temporarily, -1 if the query is
 + * completed.
 + */
 +int
 +AdvanceProducingPortal(Portal portal, bool can_wait)
 +{
 +      Portal          saveActivePortal;
 +      ResourceOwner saveResourceOwner;
 +      MemoryContext savePortalContext;
 +      MemoryContext oldContext;
 +      QueryDesc  *queryDesc;
 +      SharedQueue squeue;
 +      DestReceiver *treceiver;
 +      int                     result;
 +
 +      queryDesc = PortalGetQueryDesc(portal);
 +      squeue = queryDesc->squeue;
 +
 +      Assert(queryDesc);
 +      /* Make sure the portal is producing */
 +      Assert(squeue && queryDesc->myindex == -1);
 +      /* Make sure there is proper receiver */
 +      Assert(queryDesc->dest && queryDesc->dest->mydest == DestProducer);
 +
 +      /*
 +       * Set up global portal context pointers.
 +       */
 +      saveActivePortal = ActivePortal;
 +      saveResourceOwner = CurrentResourceOwner;
 +      savePortalContext = PortalContext;
 +      PG_TRY();
 +      {
 +              ActivePortal = portal;
 +              CurrentResourceOwner = portal->resowner;
 +              PortalContext = PortalGetHeapMemory(portal);
 +
 +              oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
 +
 +              /*
 +               * That is the first pass thru if the hold store is not initialized yet,
 +               * Need to initialize stuff.
 +               */
 +              if (portal->holdStore == NULL && portal->status != PORTAL_FAILED)
 +              {
 +                      int idx;
 +                      char storename[64];
 +
 +                      PortalCreateProducerStore(portal);
 +                      treceiver = CreateDestReceiver(DestTuplestore);
 +                      SetTuplestoreDestReceiverParams(treceiver,
 +                                                                                      portal->holdStore,
 +                                                                                      portal->holdContext,
 +                                                                                      false);
 +                      SetSelfConsumerDestReceiver(queryDesc->dest, treceiver);
 +                      SetProducerTempMemory(queryDesc->dest, portal->tmpContext);
 +                      snprintf(storename, 64, "%s producer store", portal->name);
 +                      tuplestore_collect_stat(portal->holdStore, storename);
 +                      /*
 +                       * Tuplestore does not clear eof flag on the active read pointer,
 +                       * causing the store is always in EOF state once reached when
 +                       * there is a single read pointer. We do not want behavior like this
 +                       * and workaround by using secondary read pointer.
 +                       * Primary read pointer (0) is active when we are writing to
 +                       * the tuple store, secondary read pointer is for reading, and its
 +                       * eof flag is cleared if a tuple is written to the store.
 +                       * We know the extra read pointer has index 1, so do not store it.
 +                       */
 +                      idx = tuplestore_alloc_read_pointer(portal->holdStore, 0);
 +                      Assert(idx == 1);
 +              }
 +
 +              if (queryDesc->estate && !queryDesc->estate->es_finished &&
 +                              portal->status != PORTAL_FAILED)
 +              {
 +                      /*
 +                       * If the portal's hold store has tuples available for read and
 +                       * all consumer queues are not empty we skip advancing the portal
 +                       * (pause it) to prevent buffering too many rows at the producer.
 +                       * NB just created portal store would not be in EOF state, but in
 +                       * this case consumer queues will be empty and do not allow
 +                       * erroneous pause. After the first call to AdvanceProducingPortal
 +                       * portal will try to read the hold store and EOF flag will be set
 +                       * correctly.
 +                       */
 +                      tuplestore_select_read_pointer(portal->holdStore, 1);
 +                      if (!tuplestore_ateof(portal->holdStore) &&
 +                                      SharedQueueCanPause(squeue))
 +                              result = 0;
 +                      else
 +                              result = 1;
 +                      tuplestore_select_read_pointer(portal->holdStore, 0);
 +
 +                      if (result)
 +                      {
 +                              /* Execute query and dispatch tuples via dest receiver */
 +#define PRODUCE_TUPLES 100
 +                              PushActiveSnapshot(queryDesc->snapshot);
 +                              ExecutorRun(queryDesc, ForwardScanDirection, PRODUCE_TUPLES);
 +                              PopActiveSnapshot();
 +
 +                              if (queryDesc->estate->es_processed < PRODUCE_TUPLES)
 +                              {
 +                                      /*
 +                                       * Finish the executor, but we may still have some tuples
 +                                       * in the local storages.
 +                                       * We should keep trying pushing them into the squeue, so do not
 +                                       * remove the portal from the list of producers.
 +                                       */
 +                                      ExecutorFinish(queryDesc);
 +                              }
 +                      }
 +              }
 +
 +              /* Try to dump local tuplestores */
 +              if ((queryDesc->estate == NULL || queryDesc->estate->es_finished) &&
 +                              ProducerReceiverPushBuffers(queryDesc->dest))
 +              {
 +                      if (can_wait && queryDesc->estate == NULL)
 +                      {
 +                              (*queryDesc->dest->rDestroy) (queryDesc->dest);
 +                              queryDesc->dest = NULL;
 +                              portal->queryDesc = NULL;
 +                              squeue = NULL;
 +
 +                              removeProducingPortal(portal);
 +                              FreeQueryDesc(queryDesc);
 +
 +                              /*
 +                               * Current context is the portal context, which is going
 +                               * to be deleted
 +                               */
 +                              MemoryContextSwitchTo(TopTransactionContext);
 +
 +                              ActivePortal = saveActivePortal;
 +                              CurrentResourceOwner = saveResourceOwner;
 +                              PortalContext = savePortalContext;
 +
 +                              if (portal->resowner)
 +                              {
 +                                      bool            isCommit = (portal->status != PORTAL_FAILED);
 +
 +                                      ResourceOwnerRelease(portal->resowner,
 +                                                                               RESOURCE_RELEASE_BEFORE_LOCKS,
 +                                                                               isCommit, false);
 +                                      ResourceOwnerRelease(portal->resowner,
 +                                                                               RESOURCE_RELEASE_LOCKS,
 +                                                                               isCommit, false);
 +                                      ResourceOwnerRelease(portal->resowner,
 +                                                                               RESOURCE_RELEASE_AFTER_LOCKS,
 +                                                                               isCommit, false);
 +                                      ResourceOwnerDelete(portal->resowner);
 +                              }
 +                              portal->resowner = NULL;
 +
 +                              /*
 +                               * Delete tuplestore if present.  We should do this even under error
 +                               * conditions; since the tuplestore would have been using cross-
 +                               * transaction storage, its temp files need to be explicitly deleted.
 +                               */
 +                              if (portal->holdStore)
 +                              {
 +                                      MemoryContext oldcontext;
 +
 +                                      oldcontext = MemoryContextSwitchTo(portal->holdContext);
 +                                      tuplestore_end(portal->holdStore);
 +                                      MemoryContextSwitchTo(oldcontext);
 +                                      portal->holdStore = NULL;
 +                              }
 +
 +                              /* delete tuplestore storage, if any */
 +                              if (portal->holdContext)
 +                                      MemoryContextDelete(portal->holdContext);
 +
 +                              /* release subsidiary storage */
 +                              MemoryContextDelete(PortalGetHeapMemory(portal));
 +
 +                              /* release portal struct (it's in PortalMemory) */
 +                              pfree(portal);
 +                      }
 +                      /* report portal is not producing */
 +                      result = -1;
 +              }
 +              else
 +              {
 +                      result = SharedQueueCanPause(queryDesc->squeue) ? 0 : 1;
 +              }
 +      }
 +      PG_CATCH();
 +      {
 +              /* Uncaught error while executing portal: mark it dead */
 +              portal->status = PORTAL_FAILED;
 +              /*
 +               * Reset producer to allow consumers to finish, so receiving node will
 +               * handle the error.
 +               */
 +              if (squeue)
 +                      SharedQueueReset(squeue, -1);
 +
 +              /* Restore global vars and propagate error */
 +              ActivePortal = saveActivePortal;
 +              CurrentResourceOwner = saveResourceOwner;
 +              PortalContext = savePortalContext;
 +
 +              PG_RE_THROW();
 +      }
 +      PG_END_TRY();
 +
 +      MemoryContextSwitchTo(oldContext);
 +
 +      ActivePortal = saveActivePortal;
 +      CurrentResourceOwner = saveResourceOwner;
 +      PortalContext = savePortalContext;
 +
 +      return result;
 +}
 +
 +
 +/*
 + * Iterate over producing portal, determine already closed, and clean them up,
 + * waiting while consumers finish their work. Closed producers should be
 + * cleaned up and resources are released before proceeding with handling of
 + * next request.
 + */
 +void
 +cleanupClosedProducers(void)
 +{
 +      ListCell   *lc = list_head(getProducingPortals());
 +      while (lc)
 +      {
 +              Portal p = (Portal) lfirst(lc);
 +              QueryDesc  *queryDesc = PortalGetQueryDesc(p);
 +              SharedQueue squeue = queryDesc->squeue;
 +
 +              /*
 +               * Get next already, because next call may remove cell from
 +               * the list and invalidate next reference
 +               */
 +              lc = lnext(lc);
 +
 +              /* When portal is closed executor state is not set */
 +              if (queryDesc->estate == NULL)
 +              {
 +                      /*
 +                       * Set up global portal context pointers.
 +                       */
 +                      Portal          saveActivePortal = ActivePortal;
 +                      ResourceOwner saveResourceOwner = CurrentResourceOwner;
 +                      MemoryContext savePortalContext = PortalContext;
 +
 +                      PG_TRY();
 +                      {
 +                              MemoryContext oldContext;
 +                              ActivePortal = p;
 +                              CurrentResourceOwner = p->resowner;
 +                              PortalContext = PortalGetHeapMemory(p);
 +
 +                              oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(p));
 +
 +                              (*queryDesc->dest->rDestroy) (queryDesc->dest);
 +                              queryDesc->dest = NULL;
 +                              p->queryDesc = NULL;
 +                              squeue = NULL;
 +
 +                              removeProducingPortal(p);
 +                              FreeQueryDesc(queryDesc);
 +
 +                              /*
 +                               * Current context is the portal context, which is going
 +                               * to be deleted
 +                               */
 +                              MemoryContextSwitchTo(TopTransactionContext);
 +
 +                              ActivePortal = saveActivePortal;
 +                              CurrentResourceOwner = saveResourceOwner;
 +                              PortalContext = savePortalContext;
 +
 +                              if (p->resowner)
 +                              {
 +                                      bool            isCommit = (p->status != PORTAL_FAILED);
 +
 +                                      ResourceOwnerRelease(p->resowner,
 +                                                                               RESOURCE_RELEASE_BEFORE_LOCKS,
 +                                                                               isCommit, false);
 +                                      ResourceOwnerRelease(p->resowner,
 +                                                                               RESOURCE_RELEASE_LOCKS,
 +                                                                               isCommit, false);
 +                                      ResourceOwnerRelease(p->resowner,
 +                                                                               RESOURCE_RELEASE_AFTER_LOCKS,
 +                                                                               isCommit, false);
 +                                      ResourceOwnerDelete(p->resowner);
 +                              }
 +                              p->resowner = NULL;
 +
 +                              /*
 +                               * Delete tuplestore if present.  We should do this even under error
 +                               * conditions; since the tuplestore would have been using cross-
 +                               * transaction storage, its temp files need to be explicitly deleted.
 +                               */
 +                              if (p->holdStore)
 +                              {
 +                                      MemoryContext oldcontext;
 +
 +                                      oldcontext = MemoryContextSwitchTo(p->holdContext);
 +                                      tuplestore_end(p->holdStore);
 +                                      MemoryContextSwitchTo(oldcontext);
 +                                      p->holdStore = NULL;
 +                              }
 +
 +                              /* delete tuplestore storage, if any */
 +                              if (p->holdContext)
 +                                      MemoryContextDelete(p->holdContext);
 +
 +                              /* release subsidiary storage */
 +                              MemoryContextDelete(PortalGetHeapMemory(p));
 +
 +                              /* release portal struct (it's in PortalMemory) */
 +                              pfree(p);
 +
 +                              MemoryContextSwitchTo(oldContext);
 +                      }
 +                      PG_CATCH();
 +                      {
 +                              /* Uncaught error while executing portal: mark it dead */
 +                              p->status = PORTAL_FAILED;
 +                              /*
 +                               * Reset producer to allow consumers to finish, so receiving node will
 +                               * handle the error.
 +                               */
 +                              if (squeue)
 +                                      SharedQueueReset(squeue, -1);
 +
 +                              /* Restore global vars and propagate error */
 +                              ActivePortal = saveActivePortal;
 +                              CurrentResourceOwner = saveResourceOwner;
 +                              PortalContext = savePortalContext;
 +
 +                              PG_RE_THROW();
 +                      }
 +                      PG_END_TRY();
 +
 +                      ActivePortal = saveActivePortal;
 +                      CurrentResourceOwner = saveResourceOwner;
 +                      PortalContext = savePortalContext;
 +              }
 +      }
 +}
 +#endif
index b73be0ab6e3a30a9b6db1aea2cc71c50c5e55cae,ac50c2a03d18629ace71e3e926dfcf757791fbf8..5c84daae722c772788ffe60d885214c3daf052ba
@@@ -5,10 -5,8 +5,10 @@@
   *      commands.  At one time acted as an interface between the Lisp and C
   *      systems.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
@@@ -1268,48 -835,21 +1270,61 @@@ standard_ProcessUtility(Node *parsetree
                                else
                                        ExecRenameStmt(stmt);
                        }
 +#ifdef PGXC
 +                      if (IS_PGXC_LOCAL_COORDINATOR)
 +                      {
 +                              RenameStmt *stmt = (RenameStmt *) parsetree;
 +                              RemoteQueryExecType exec_type;
 +                              bool is_temp = false;
 +
 +                              /* Try to use the object relation if possible */
 +                              if (stmt->relation)
 +                              {
 +                                      /*
 +                                       * When a relation is defined, it is possible that this object does
 +                                       * not exist but an IF EXISTS clause might be used. So we do not do
 +                                       * any error check here but block the access to remote nodes to
 +                                       * this object as it does not exisy
 +                                       */
 +                                      Oid relid = RangeVarGetRelid(stmt->relation, NoLock, true);
 +
 +                                      if (OidIsValid(relid))
 +                                              exec_type = ExecUtilityFindNodes(stmt->renameType,
 +                                                              relid,
 +                                                              &is_temp);
 +                                      else
 +                                              exec_type = EXEC_ON_NONE;
 +                              }
 +                              else
 +                              {
 +                                      exec_type = ExecUtilityFindNodes(stmt->renameType,
 +                                                      InvalidOid,
 +                                                      &is_temp);
 +                              }
 +
 +                              ExecUtilityStmtOnNodes(queryString,
 +                                              NULL,
 +                                              sentToRemote,
 +                                              false,
 +                                              exec_type,
 +                                              is_temp);
 +                      }
 +#endif
                        break;
  
+               case T_AlterObjectDependsStmt:
+                       {
+                               AlterObjectDependsStmt *stmt = (AlterObjectDependsStmt *) parsetree;
+                               if (EventTriggerSupportsObjectType(stmt->objectType))
+                                       ProcessUtilitySlow(parsetree, queryString,
+                                                                          context, params,
+                                                                          dest, completionTag);
+                               else
+                                       ExecAlterObjectDependsStmt(stmt, NULL);
+                       }
+                       break;
                case T_AlterObjectSchemaStmt:
                        {
                                AlterObjectSchemaStmt *stmt = (AlterObjectSchemaStmt *) parsetree;
index 5eba10a80555f3eb678ca8d5bde0c1733d1456ff,8fbd85014694c82cd2dbdc48ef4b59380bcf1355..8ee878e1128ec35afd0c51f66e9fc49da443f6a5
@@@ -3,8 -3,7 +3,8 @@@
   * arrayfuncs.c
   *      Support functions for arrays.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index b9f0bf6f055f818cc607c9bbf3ce0a6125ebf72f,420f383a804cf136d61aa512074b1b8e12900508..ff01b5f702efb1ba9c0549f6fa98c7e8532f2c12
@@@ -3,8 -3,7 +3,8 @@@
   * date.c
   *      implements DATE and TIME data types specified in SQL standard
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994-5, Regents of the University of California
   *
   *
index 50daac1f2b59ee85b9c6875b9c49f31509733225,0e8a82d6f4dd5ee4bf1110ee00723a5043239f78..770198fdb460d3aa501f22210a681ebe7e1700c5
@@@ -2,8 -2,7 +2,8 @@@
   * dbsize.c
   *            Database object size functions, and related inquiries
   *
-  * Copyright (c) 2002-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Copyright (c) 2002-2016, PostgreSQL Global Development Group
   *
   * IDENTIFICATION
   *      src/backend/utils/adt/dbsize.c
  #include "utils/rel.h"
  #include "utils/relfilenodemap.h"
  #include "utils/relmapper.h"
 +#include "utils/lsyscache.h"
  #include "utils/syscache.h"
 +#ifdef XCP
 +#include "catalog/pg_type.h"
 +#include "catalog/pgxc_node.h"
 +#include "executor/executor.h"
 +#include "nodes/makefuncs.h"
 +#include "pgxc/execRemote.h"
 +#include "utils/snapmgr.h"
 +#endif
 +
 +#ifdef PGXC
 +static Datum pgxc_database_size(Oid dbOid);
 +static Datum pgxc_tablespace_size(Oid tbOid);
 +static int64 pgxc_exec_sizefunc(Oid relOid, char *funcname, char *extra_arg);
  
 +/*
 + * Below macro is important when the object size functions are called
 + * for system catalog tables. For pg_catalog tables and other Coordinator-only
 + * tables, we should return the data from Coordinator. If we don't find
 + * locator info, that means it is a Coordinator-only table.
 + */
 +#define COLLECT_FROM_DATANODES(relid) \
 +      (IS_PGXC_LOCAL_COORDINATOR && \
 +      (GetRelationLocInfo((relid)) != NULL))
 +#endif
+ /* Divide by two and round towards positive infinity. */
+ #define half_rounded(x)   (((x) + ((x) < 0 ? 0 : 1)) / 2)
  
  /* Return physical size of directory contents, or 0 if dir doesn't exist */
  static int64
@@@ -924,11 -996,7 +1083,11 @@@ pg_relation_filepath(PG_FUNCTION_ARGS
                        break;
                case RELPERSISTENCE_TEMP:
                        if (isTempOrTempToastNamespace(relform->relnamespace))
-                               backend = MyBackendId;
 +#ifdef XCP
 +                              backend = OidIsValid(MyCoordId) ? InvalidBackendId : MyBackendId;
 +#else
+                               backend = BackendIdForTempRelations();
 +#endif
                        else
                        {
                                /* Do it the hard way. */
Simple merge
Simple merge
index d2b5e1efdc99065c58995983861dfe77fb712ec8,987cfd18625008bcae84c2ce9d5bc3f1758a072f..002aa4946a6f942ba3af0850e6dc28c80d7636f0
@@@ -1757,10 -1734,10 +1736,9 @@@ jsonb_object_agg_transfn(PG_FUNCTION_AR
        }
  
        /* set up the accumulator on the first go round */
--
        if (PG_ARGISNULL(0))
        {
-               Oid         arg_type;
+               Oid                     arg_type;
  
                oldcontext = MemoryContextSwitchTo(aggcontext);
                state = palloc(sizeof(JsonbAggState));
index b4c9f46a123ecd0a6646fad412db467bcedcb419,2e55368061d4423a1f0e7b72edd242e8bdc1db3c..ea91cfcaf40e23f6fe81ac8391ceac9df7cd38af
  #include "catalog/pg_type.h"
  #include "funcapi.h"
  #include "miscadmin.h"
 +#ifdef PGXC
 +#include "pgxc/pgxc.h"
 +#include "pgxc/pgxcnode.h"
 +#include "pgxc/nodemgr.h"
 +#include "executor/spi.h"
 +#include "tcop/utility.h"
 +#endif
  #include "storage/predicate_internals.h"
+ #include "utils/array.h"
  #include "utils/builtins.h"
  
  
Simple merge
index bce559b8ce811399fd15acc1a0fe9b8f59cc3f5a,620226cea11d9e51f76d6f3a2f48179d7997e8f5..295216ec9b787c82e3a5094d0b9e5559eb364973
@@@ -3172,225 -3172,22 +3172,241 @@@ makeNumericAggState(FunctionCallInfo fc
        return state;
  }
  
 +/*
 + * numeric_agg_state_in() -
 + *
 + *    Input function for numeric_agg_state data type
 + */
 +Datum
 +numeric_agg_state_in(PG_FUNCTION_ARGS)
 +{
 +      char       *str = pstrdup(PG_GETARG_CSTRING(0));
 +      NumericAggState *state;
 +      char *token;
 +
 +      state = (NumericAggState *) palloc0(sizeof (NumericAggState));
 +      init_var(&state->sumX);
 +
 +      token = strtok(str, ":");
 +      state->calcSumX2 = (*token == 't');
 +
 +      token = strtok(NULL, ":");
 +      state->N = DatumGetInt64(DirectFunctionCall1(int8in,CStringGetDatum(token)));
 +
 +      token = strtok(NULL, ":");
 +      set_var_from_str(token, token, &state->sumX);
 +
 +      token = strtok(NULL, ":");
 +      if (state->calcSumX2)
 +      {
 +              init_var(&state->sumX2);
 +              set_var_from_str(token, token, &state->sumX2);
 +      }
 +
 +      token = strtok(NULL, ":");
 +      state->maxScale = DatumGetInt32(DirectFunctionCall1(int4in,CStringGetDatum(token)));
 +
 +      token = strtok(NULL, ":");
 +      state->maxScaleCount = DatumGetInt64(DirectFunctionCall1(int8in,CStringGetDatum(token)));
 +
 +      token = strtok(NULL, ":");
 +      state->NaNcount  = DatumGetInt64(DirectFunctionCall1(int8in,CStringGetDatum(token)));
 +
 +      pfree(str);
 +
 +      PG_RETURN_POINTER(state);
 +}
 +
 +/*
 + * numeric_agg_state_out() -
 + *
 + *    Output function for numeric_agg_state data type
 + */
 +Datum
 +numeric_agg_state_out(PG_FUNCTION_ARGS)
 +{
 +      NumericAggState *state = (NumericAggState *) PG_GETARG_POINTER(0);
 +      char *sumX_str, *sumX2_str, *N_str,
 +               *maxScale_str, *maxScaleCount_str,
 +               *NaNcount_str;
 +      char *result;
 +      int      len;
 +
 +      sumX_str = get_str_from_var(&state->sumX);
 +      if (state->calcSumX2)
 +              sumX2_str = get_str_from_var(&state->sumX2);
 +      else
 +              sumX2_str = "0";
 +
 +      N_str = DatumGetCString(DirectFunctionCall1(int8out,
 +                              Int64GetDatum(state->N)));
 +      maxScaleCount_str = DatumGetCString(DirectFunctionCall1(int8out,
 +                              Int64GetDatum(state->maxScaleCount)));
 +      NaNcount_str = DatumGetCString(DirectFunctionCall1(int8out,
 +                              Int64GetDatum(state->NaNcount)));
 +      maxScale_str = DatumGetCString(DirectFunctionCall1(int4out,
 +                              Int32GetDatum(state->maxScale)));
 +
 +      len = 1 + strlen(N_str) + strlen(sumX_str) + strlen(sumX2_str) +
 +              strlen(maxScale_str) + strlen(maxScaleCount_str) +
 +              strlen(NaNcount_str) + 7;
 +
 +      result = (char *) palloc0(len);
 +
 +      snprintf(result, len, "%c:%s:%s:%s:%s:%s:%s",
 +                      state->calcSumX2 ? 't' : 'f',
 +                      N_str, sumX_str, sumX2_str,
 +                      maxScale_str, maxScaleCount_str, NaNcount_str);
 +
 +      pfree(N_str);
 +      pfree(sumX_str);
 +      if (state->calcSumX2)
 +              pfree(sumX2_str);
 +      pfree(maxScale_str);
 +      pfree(maxScaleCount_str);
 +      pfree(NaNcount_str);
 +
 +      PG_RETURN_CSTRING(result);
 +}
 +
 +/*
 + * numeric_agg_state_recv - converts binary format to numeric_agg_state
 + */
 +Datum
 +numeric_agg_state_recv(PG_FUNCTION_ARGS)
 +{
 +      StringInfo      buf = (StringInfo) PG_GETARG_POINTER(0);
 +      NumericAggState *state;
 +      int len;
 +      int     i;
 +
 +      state = (NumericAggState *) palloc0(sizeof (NumericAggState));
 +
 +      state->calcSumX2 = pq_getmsgbyte(buf);
 +      state->N = pq_getmsgint(buf, sizeof (int64));
 +
 +      len = (uint16) pq_getmsgint(buf, sizeof(uint16));
 +      if (len < 0 || len > NUMERIC_MAX_PRECISION + NUMERIC_MAX_RESULT_SCALE)
 +              ereport(ERROR,
 +                              (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
 +                               errmsg("invalid length in external \"numeric\" value")));
 +
 +      alloc_var(&state->sumX, len);
 +
 +      state->sumX.weight = (int16) pq_getmsgint(buf, sizeof(int16));
 +      state->sumX.sign = (uint16) pq_getmsgint(buf, sizeof(uint16));
 +      if (!(state->sumX.sign == NUMERIC_POS ||
 +                state->sumX.sign == NUMERIC_NEG ||
 +                state->sumX.sign == NUMERIC_NAN))
 +              ereport(ERROR,
 +                              (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
 +                               errmsg("invalid sign in external \"numeric\" value")));
 +
 +      state->sumX.dscale = (uint16) pq_getmsgint(buf, sizeof(uint16));
 +      for (i = 0; i < len; i++)
 +      {
 +              NumericDigit d = pq_getmsgint(buf, sizeof(NumericDigit));
 +
 +              if (d < 0 || d >= NBASE)
 +                      ereport(ERROR,
 +                                      (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
 +                                       errmsg("invalid digit in external \"numeric\" value")));
 +              state->sumX.digits[i] = d;
 +      }
 +
 +      if (state->calcSumX2)
 +      {
 +              len = (uint16) pq_getmsgint(buf, sizeof(uint16));
 +              if (len < 0 || len > NUMERIC_MAX_PRECISION + NUMERIC_MAX_RESULT_SCALE)
 +                      ereport(ERROR,
 +                                      (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
 +                                       errmsg("invalid length in external \"numeric\" value")));
 +
 +              alloc_var(&state->sumX2, len);
 +
 +              state->sumX2.weight = (int16) pq_getmsgint(buf, sizeof(int16));
 +              state->sumX2.sign = (uint16) pq_getmsgint(buf, sizeof(uint16));
 +              if (!(state->sumX2.sign == NUMERIC_POS ||
 +                                      state->sumX2.sign == NUMERIC_NEG ||
 +                                      state->sumX2.sign == NUMERIC_NAN))
 +                      ereport(ERROR,
 +                                      (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
 +                                       errmsg("invalid sign in external \"numeric\" value")));
 +
 +              state->sumX2.dscale = (uint16) pq_getmsgint(buf, sizeof(uint16));
 +              for (i = 0; i < len; i++)
 +              {
 +                      NumericDigit d = pq_getmsgint(buf, sizeof(NumericDigit));
 +
 +                      if (d < 0 || d >= NBASE)
 +                              ereport(ERROR,
 +                                              (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
 +                                               errmsg("invalid digit in external \"numeric\" value")));
 +                      state->sumX2.digits[i] = d;
 +              }
 +      }
 +      state->maxScale = pq_getmsgint(buf, sizeof (int));
 +      state->maxScaleCount = pq_getmsgint(buf, sizeof (int64));
 +      state->NaNcount = pq_getmsgint(buf, sizeof (int64));
 +
 +      PG_RETURN_POINTER(state);
 +}
 +
 +/*
 + * numeric_agg_state_send - converts numeric_agg_state to binary format
 + */
 +Datum
 +numeric_agg_state_send(PG_FUNCTION_ARGS)
 +{
 +      NumericAggState *state = (NumericAggState *) PG_GETARG_POINTER(0);
 +      StringInfoData buf;
 +      int i;
 +
 +      pq_begintypsend(&buf);
 +
 +      pq_sendbyte(&buf, state->calcSumX2);
 +      pq_sendint(&buf, state->N, sizeof (int64));
 +
 +      pq_sendint(&buf, state->sumX.ndigits, sizeof(int16));
 +      pq_sendint(&buf, state->sumX.weight, sizeof(int16));
 +      pq_sendint(&buf, state->sumX.sign, sizeof(int16));
 +      pq_sendint(&buf, state->sumX.dscale, sizeof(int16));
 +      for (i = 0; i < state->sumX.ndigits; i++)
 +              pq_sendint(&buf, state->sumX.digits[i], sizeof(NumericDigit));
 +
 +      if (state->calcSumX2)
 +      {
 +              pq_sendint(&buf, state->sumX2.ndigits, sizeof(int16));
 +              pq_sendint(&buf, state->sumX2.weight, sizeof(int16));
 +              pq_sendint(&buf, state->sumX2.sign, sizeof(int16));
 +              pq_sendint(&buf, state->sumX2.dscale, sizeof(int16));
 +              for (i = 0; i < state->sumX2.ndigits; i++)
 +                      pq_sendint(&buf, state->sumX2.digits[i], sizeof(NumericDigit));
 +      }
 +
 +      pq_sendint(&buf, state->maxScale, sizeof (int));
 +      pq_sendint(&buf, state->maxScaleCount, sizeof (int64));
 +      pq_sendint(&buf, state->NaNcount, sizeof (int64));
 +
 +      PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
 +}
 +
+ /*
+  * Like makeNumericAggState(), but allocate the state in the current memory
+  * context.
+  */
+ static NumericAggState *
+ makeNumericAggStateCurrentContext(bool calcSumX2)
+ {
+       NumericAggState *state;
+       state = (NumericAggState *) palloc0(sizeof(NumericAggState));
+       state->calcSumX2 = calcSumX2;
+       state->agg_context = CurrentMemoryContext;
+       return state;
+ }
  /*
   * Accumulate a new input value for numeric aggregate functions.
   */
index 24dd73f5180ea3ad7adb3a0f11e34453b2141569,dd447cf4e862436ebadbc931602100bc819d965b..94ee7e2d037b48217c756ce4e295191620f99dc1
@@@ -11,8 -11,7 +11,8 @@@
   * we do better?)
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index 3f9d56876033626cb8c7f051febc117b0685e1e7,b4765005feb29a9d67bfcfe7d20c069b6438e54e..698d903e6e1ddf7ba89b50bcf0447631c56a2f9d
@@@ -13,8 -13,7 +13,8 @@@
   *    plan --- consider improving this someday.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   *
   * src/backend/utils/adt/ri_triggers.c
   *
@@@ -2990,7 -3014,8 +3021,7 @@@ ri_PlanCheck(const char *querystr, int 
        /* Switch to proper UID to perform check as */
        GetUserIdAndSecContext(&save_userid, &save_sec_context);
        SetUserIdAndSecContext(RelationGetForm(query_rel)->relowner,
-                                                  save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
+                                                  save_sec_context | SECURITY_LOCAL_USERID_CHANGE |
 -                                                 SECURITY_NOFORCE_RLS);
  
        /* Create the plan */
        qplan = SPI_prepare(querystr, nargs, argtypes);
Simple merge
index 6736ae4953012ae62353582005f950f4d85f5686,ec966c752ea46039ea68132d5b0c95877e0ca267..a3e47612111e3211429111b5e0685dda857387f4
@@@ -4,8 -4,7 +4,8 @@@
   *      Functions to convert stored expressions/querytrees back to
   *      source text
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  #include <unistd.h>
  #include <fcntl.h>
  
 +#ifdef PGXC
 +#include "access/reloptions.h"
 +#endif /* PGXC */
+ #include "access/amapi.h"
  #include "access/htup_details.h"
  #include "access/sysattr.h"
  #include "catalog/dependency.h"
  #include "catalog/indexing.h"
  #include "catalog/pg_aggregate.h"
+ #include "catalog/pg_am.h"
  #include "catalog/pg_authid.h"
 +#ifdef PGXC
 +#include "catalog/pg_aggregate.h"
 +#endif /* PGXC */
  #include "catalog/pg_collation.h"
  #include "catalog/pg_constraint.h"
  #include "catalog/pg_depend.h"
  #include "catalog/pg_type.h"
  #include "commands/defrem.h"
  #include "commands/tablespace.h"
+ #include "common/keywords.h"
  #include "executor/spi.h"
  #include "funcapi.h"
 +#ifdef PGXC
 +#include "nodes/execnodes.h"
 +#endif
+ #include "mb/pg_wchar.h"
  #include "miscadmin.h"
  #include "nodes/makefuncs.h"
  #include "nodes/nodeFuncs.h"
@@@ -4370,179 -4444,7 +4580,178 @@@ make_viewdef(StringInfo buf, HeapTuple 
        heap_close(ev_relation, AccessShareLock);
  }
  
 +#ifdef PGXC
 +/* ----------
 + * deparse_query                      - Parse back one query parsetree
 + *
 + * Purpose of this function is to build up statement for a RemoteQuery
 + * It just calls get_query_def without pretty print flags
 + * ----------
 + */
 +void
 +deparse_query(Query *query, StringInfo buf, List *parentnamespace,
 +                              bool finalise_aggs, bool sortgroup_colno)
 +{
 +      get_query_def(query, buf, parentnamespace, NULL, 0, 0, 0, finalise_aggs,
 +                      sortgroup_colno);
 +}
 +
 +/* code borrowed from get_insert_query_def */
 +void
 +get_query_def_from_valuesList(Query *query, StringInfo buf)
 +{
 +
 +      RangeTblEntry *select_rte = NULL;
 +      RangeTblEntry *values_rte = NULL;
 +      RangeTblEntry *rte;
 +      char       *sep;
 +      ListCell   *values_cell;
 +      ListCell   *l;
 +      List       *strippedexprs;
 +      deparse_context context;
 +      deparse_namespace dpns;
 +
 +      /*
 +       * Before we begin to examine the query, acquire locks on referenced
 +       * relations, and fix up deleted columns in JOIN RTEs.  This ensures
 +       * consistent results.  Note we assume it's OK to scribble on the passed
 +       * querytree!
 +       */
 +      AcquireRewriteLocks(query, false, false);
 +
 +      context.buf = buf;
 +      context.namespaces = NIL;
 +      context.windowClause = NIL;
 +      context.windowTList = NIL;
 +      context.varprefix = (list_length(query->rtable) != 1);
 +      context.prettyFlags = 0;
 +      context.indentLevel = 0;
 +      context.wrapColumn = 0;
 +
 +      dpns.rtable = query->rtable;
 +      dpns.ctes = query->cteList;
 +      dpns.planstate = NULL;
 +      dpns.ancestors = NIL;
 +      dpns.outer_planstate = dpns.inner_planstate = NULL;
 +
 +      /*
 +       * If it's an INSERT ... SELECT or VALUES (...), (...), ... there will be
 +       * a single RTE for the SELECT or VALUES.
 +       */
 +      foreach(l, query->rtable)
 +      {
 +              rte = (RangeTblEntry *) lfirst(l);
 +
 +              if (rte->rtekind == RTE_SUBQUERY)
 +              {
 +                      if (select_rte)
 +                              elog(ERROR, "too many subquery RTEs in INSERT");
 +                      select_rte = rte;
 +              }
 +
 +              if (rte->rtekind == RTE_VALUES)
 +              {
 +                      if (values_rte)
 +                              elog(ERROR, "too many values RTEs in INSERT");
 +                      values_rte = rte;
 +              }
 +      }
 +      if (select_rte && values_rte)
 +              elog(ERROR, "both subquery and values RTEs in INSERT");
 +
 +      /*
 +       * Start the query with INSERT INTO relname
 +       */
 +      rte = rt_fetch(query->resultRelation, query->rtable);
 +      Assert(rte->rtekind == RTE_RELATION);
 +
 +      appendStringInfo(buf, "INSERT INTO %s (",
 +                                       generate_relation_name(rte->relid, NIL));
 +
 +      /*
 +       * Add the insert-column-names list.  To handle indirection properly, we
 +       * need to look for indirection nodes in the top targetlist (if it's
 +       * INSERT ... SELECT or INSERT ... single VALUES), or in the first
 +       * expression list of the VALUES RTE (if it's INSERT ... multi VALUES). We
 +       * assume that all the expression lists will have similar indirection in
 +       * the latter case.
 +       */
 +      if (values_rte)
 +              values_cell = list_head((List *) linitial(values_rte->values_lists));
 +      else
 +              values_cell = NULL;
 +      strippedexprs = NIL;
 +      sep = "";
 +      foreach(l, query->targetList)
 +      {
 +              TargetEntry *tle = (TargetEntry *) lfirst(l);
 +
 +              elog(DEBUG1, "targetEntry type is %d\n)", tle->expr->type);
 +              if (tle->resjunk || !IsA(tle->expr, Var))
 +                      continue;                       /* ignore junk entries */
 +
 +              appendStringInfoString(buf, sep);
 +              sep = ", ";
 +
 +              /*
 +               * Put out name of target column; look in the catalogs, not at
 +               * tle->resname, since resname will fail to track RENAME.
 +               */
 +              appendStringInfoString(buf,quote_identifier(get_relid_attribute_name(rte->relid, tle->resno)));
 +
 +              /*
 +               * Print any indirection needed (subfields or subscripts), and strip
 +               * off the top-level nodes representing the indirection assignments.
 +               */
 +              if (values_cell)
 +              {
 +                      /* we discard the stripped expression in this case */
 +                      processIndirection((Node *) lfirst(values_cell), &context, true);
 +                      values_cell = lnext(values_cell);
 +              }
 +              else
 +              {
 +                      /* we keep a list of the stripped expressions in this case */
 +                      strippedexprs = lappend(strippedexprs, processIndirection((Node *) tle->expr, &context, true));
 +              }
 +      }
 +      appendStringInfo(buf, ") ");
 +
 +      if (select_rte)
 +      {
 +              /* Add the SELECT */
 +              get_query_def(select_rte->subquery, buf, NIL, NULL,
 +                                        context.prettyFlags, context.wrapColumn,
 +                                        context.indentLevel,
 +                                        context.finalise_aggs, context.sortgroup_colno);
 +      }
 +      else if (values_rte)
 +      {
 +              /* A WITH clause is possible here */
 +              get_with_clause(query, &context);
 +              /* Add the multi-VALUES expression lists */
 +              get_values_def(values_rte->values_lists, &context);
 +      }
 +      else
 +      {
 +              /* A WITH clause is possible here */
 +              get_with_clause(query, &context);
 +              /* Add the single-VALUES expression list */
 +              appendContextKeyword(&context, "VALUES (",
 +                                                       -PRETTYINDENT_STD, PRETTYINDENT_STD, 2);
 +              get_rule_expr((Node *) strippedexprs, &context, false);
 +              appendStringInfoChar(buf, ')');
 +      }
  
 +      /* Add RETURNING if present */
 +      if (query->returningList)
 +      {
 +              appendContextKeyword(&context, " RETURNING",
 +                                                       -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
 +              get_target_list(query->returningList, &context, NULL);
 +      }
 +}
 +#endif
  /* ----------
   * get_query_def                      - Parse back one query parsetree
   *
@@@ -8644,12 -8402,33 +8948,37 @@@ get_agg_expr(Aggref *aggref, deparse_co
        StringInfo      buf = context->buf;
        Oid                     argtypes[FUNC_MAX_ARGS];
        int                     nargs;
 +#ifdef PGXC
 +      bool            added_finalfn = false;
 +#endif /* PGXC */
 +
        bool            use_variadic;
  
+       /*
+        * For a combining aggregate, we look up and deparse the corresponding
+        * partial aggregate instead.  This is necessary because our input
+        * argument list has been replaced; the new argument list always has just
+        * one element, which will point to a partial Aggref that supplies us with
+        * transition states to combine.
+        */
+       if (DO_AGGSPLIT_COMBINE(aggref->aggsplit))
+       {
+               TargetEntry *tle = linitial(aggref->args);
+               Assert(list_length(aggref->args) == 1);
+               Assert(IsA(tle, TargetEntry));
+               resolve_special_varno((Node *) tle->expr, context, original_aggref,
+                                                         get_agg_combine_expr);
+               return;
+       }
+       /*
+        * Mark as PARTIAL, if appropriate.  We look to the original aggref so as
+        * to avoid printing this when recursing from the code just above.
+        */
+       if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit))
+               appendStringInfoString(buf, "PARTIAL ");
        /* Extract the argument types as seen by the parser */
        nargs = get_aggregate_argtypes(aggref, argtypes);
  
        }
  
        appendStringInfoChar(buf, ')');
 +
 +#ifdef PGXC
 +      if (added_finalfn)
 +              appendStringInfoChar(buf, ')');
 +#endif /* PGXC */
  }
  
+ /*
+  * This is a helper function for get_agg_expr().  It's used when we deparse
+  * a combining Aggref; resolve_special_varno locates the corresponding partial
+  * Aggref and then calls this.
+  */
+ static void
+ get_agg_combine_expr(Node *node, deparse_context *context, void *private)
+ {
+       Aggref     *aggref;
+       Aggref     *original_aggref = private;
+       if (!IsA(node, Aggref))
+               elog(ERROR, "combining Aggref does not point to an Aggref");
+       aggref = (Aggref *) node;
+       get_agg_expr(aggref, context, original_aggref);
+ }
  /*
   * get_windowfunc_expr        - Parse back a WindowFunc node
   */
Simple merge
Simple merge
Simple merge
index 946b32649cbad059f3e09577088cee51e487a928,f24799251e6a0e751428ffaac3f70425e87d05b4..2095a9dfe2c14ade02c3d857c5108c8970980071
@@@ -3,8 -3,7 +3,8 @@@
   * version.c
   *     Returns the PostgreSQL version string
   *
-  * Copyright (c) 1998-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Copyright (c) 1998-2016, PostgreSQL Global Development Group
   *
   * IDENTIFICATION
   *
index 0ad4548fe15b96e2c2e5f8321d71f29fbc2cbba4,58035182298fd40510347584b9ba229f080ea049..3cbf8c73fd1c38c9242044e67bf146a1e57fac57
@@@ -85,8 -85,7 +85,8 @@@
   *    problems can be overcome cheaply.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
index 4f6a7b474e0077fc6575272780b91ead76cf0f15,13ae6add0363bdb7a7c1cddba97b2b46dbec79a0..d675081ed5bde06095f6d2c8a8b7d57553c12bb8
@@@ -3,8 -3,7 +3,8 @@@
   * lsyscache.c
   *      Convenience routines for common queries in the system catalog cache.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
index 49c047e0724952e2ebe866220f44547ebf9f3540,f42a62d5000c0e2c6de311d9bd53496b468fa823..686a7b471f1ac607ac0660c6a1a2a5d09160d4e7
@@@ -38,8 -38,7 +38,8 @@@
   * be infrequent enough that more-detailed tracking is not worth the effort.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
index 476538157838dbf81a4549e490a9a4fb1aa828e2,8d2ad018bbfb261277816dece6506b5a89785822..9d3e19617603adfea6a87cbce43369f8a245f659
@@@ -3,10 -3,8 +3,10 @@@
   * relcache.c
   *      POSTGRES relation descriptor cache code
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
  #include "commands/policy.h"
  #include "commands/trigger.h"
  #include "miscadmin.h"
+ #include "nodes/nodeFuncs.h"
  #include "optimizer/clauses.h"
- #include "optimizer/planmain.h"
  #include "optimizer/prep.h"
  #include "optimizer/var.h"
 +#ifdef PGXC
 +#include "pgxc/pgxc.h"
 +#include "postmaster/autovacuum.h"
 +#endif
  #include "rewrite/rewriteDefine.h"
  #include "rewrite/rowsecurity.h"
  #include "storage/lmgr.h"
@@@ -996,13 -993,7 +999,13 @@@ RelationBuildDesc(Oid targetRelId, boo
                case RELPERSISTENCE_TEMP:
                        if (isTempOrTempToastNamespace(relation->rd_rel->relnamespace))
                        {
-                               relation->rd_backend = MyBackendId;
 +#ifdef XCP
 +                              relation->rd_backend = OidIsValid(MyCoordId) ?
 +                                                                                              MyFirstBackendId : MyBackendId;
 +#else
 +                              
+                               relation->rd_backend = BackendIdForTempRelations();
 +#endif
                                relation->rd_islocaltemp = true;
                        }
                        else
@@@ -2930,12 -2975,7 +2992,12 @@@ RelationBuildLocalRelation(const char *
                        break;
                case RELPERSISTENCE_TEMP:
                        Assert(isTempOrTempToastNamespace(relnamespace));
-                       rel->rd_backend = MyBackendId;
 +#ifdef XCP
 +                      if (OidIsValid(MyCoordId))
 +                              rel->rd_backend = MyFirstBackendId;
 +                      else
 +#endif
+                       rel->rd_backend = BackendIdForTempRelations();
                        rel->rd_islocaltemp = true;
                        break;
                default:
index cb478023fa1cef39f251935c02d5cc449523a141,65ffe844093ac3f1da977427da8d9621f451222b..1f32c421078f83efd85363034414f0c96929683c
@@@ -3,9 -3,8 +3,9 @@@
   * syscache.c
   *      System cache management routines
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   *
   * IDENTIFICATION
Simple merge
index 909e2667e6f855c468f991d80a13907906f9cfd6,78d441d19875a66c04e2772faed90e299fe8537a..c4e366192de509cb8ac0b111f4f723bdc9c26059
@@@ -326,16 -290,10 +332,16 @@@ errstart(int elevel, const char *filena
         */
  
        /* Determine whether message is enabled for server log output */
 -      output_to_server = is_log_level_output(elevel, log_min_messages);
 +      output_to_server = is_log_level_output(elevel,
 +#ifdef USE_MODULE_MSGIDS
 +                      moduleid,
 +                      fileid,
 +                      msgid,
 +#endif
 +                      log_min_messages);
  
        /* Determine whether message is enabled for client output */
-       if (whereToSendOutput == DestRemote && elevel != COMMERROR)
+       if (whereToSendOutput == DestRemote && elevel != LOG_SERVER_ONLY)
        {
                /*
                 * client_min_messages is honored only after we complete the
@@@ -3845,34 -3697,9 +3892,34 @@@ get_overridden_log_level(int moduleid, 
   * test is correct for testing whether the message should go to the client.
   */
  static bool
 -is_log_level_output(int elevel, int log_min_level)
 +is_log_level_output(int elevel,
 +#ifdef USE_MODULE_MSGIDS
 +              int moduleid,
 +              int fileid,
 +              int msgid,
 +#endif
 +              int log_min_level)
  {
-       if (elevel == LOG || elevel == COMMERROR)
 +#ifdef USE_MODULE_MSGIDS
 +      /* 
 +       * Check if the message's compile time value has been changed during the
 +       * run time.
 +       *
 +       * Currently, we only support increasing the log level of messages and that
 +       * too only for deciding whether the message should go to the server log or
 +       * not. A message which would otherwise not qualify to go to the server
 +       * log, thus can be forced to be logged. 
 +       *
 +       * In future, we may also want to go otherway round i.e. supressing a log
 +       * message or also change severity of log messages. The latter may
 +       * especially be useful to turn some specific ERROR messages into FATAL or
 +       * PANIC to be able to get a core dump for analysis.
 +       */
 +      elevel = get_overridden_log_level(moduleid, fileid, msgid,
 +                      elevel);
 +#endif
 +
+       if (elevel == LOG || elevel == LOG_SERVER_ONLY)
        {
                if (log_min_level == LOG || log_min_level <= ERROR)
                        return true;
index 0e1c35badca9e83d74b56b0af30baaf0642e318a,f23208353c340137700e19fbc37804136d3fdc0e..5cb9a138a544f5b7fcc74eef1d202c34f5d0fdb0
@@@ -3,8 -3,7 +3,8 @@@
   * globals.c
   *      global variable declarations
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
@@@ -69,18 -69,10 +70,20 @@@ char               postgres_exec_path[MAXPGPATH];          /
  /* note: currently this is not valid in backend processes */
  #endif
  
 +#ifdef XCP
 +Oid                   MyCoordId = InvalidOid;
 +char          MyCoordName[NAMEDATALEN];
 +
 +int           MyCoordPid = 0;
 +LocalTransactionId    MyCoordLxid = 0;
 +
 +BackendId     MyFirstBackendId = InvalidBackendId;
 +#endif
 +
  BackendId     MyBackendId = InvalidBackendId;
  
+ BackendId     ParallelMasterBackendId = InvalidBackendId;
  Oid                   MyDatabaseId = InvalidOid;
  
  Oid                   MyDatabaseTableSpace = InvalidOid;
index c1b008dd3183c9413b79dd73cf77836bac9f87ec,d4625a6238f8d284998a536834050516fc0950c0..1f59d7acf8bedb294c820a8e8778d895afb1d1b9
@@@ -3,8 -3,7 +3,8 @@@
   * miscinit.c
   *      miscellaneous initialization support stuff
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
  #include <utime.h>
  #endif
  
 +#ifdef XCP
 +#include "catalog/namespace.h"
 +#endif
  #include "access/htup_details.h"
  #include "catalog/pg_authid.h"
+ #include "libpq/libpq.h"
  #include "mb/pg_wchar.h"
  #include "miscadmin.h"
 +#ifdef XCP
 +#include "pgxc/execRemote.h"
 +#endif
  #include "postmaster/autovacuum.h"
  #include "postmaster/postmaster.h"
  #include "storage/fd.h"
@@@ -1135,15 -1051,13 +1180,19 @@@ CreateLockFile(const char *filename, bo
        if (lock_files == NIL)
                on_proc_exit(UnlinkLockFiles, 0);
  
-       lock_files = lappend(lock_files, pstrdup(filename));
+       /*
+        * Use lcons so that the lock files are unlinked in reverse order of
+        * creation; this is critical!
+        */
+       lock_files = lcons(pstrdup(filename), lock_files);
  }
  
 +void
 +ForgetLockFiles()
 +{
 +      lock_files = NIL;
 +}
 +
  /*
   * Create the data directory lockfile.
   *
index 4803cea2c476d8f7c39900fccf1926faab56c2d3,d17197267ef76e2c82745280e097057c77b16751..2355321549e8b96b37ee7dae9c372cc40a07cb8e
@@@ -3,8 -3,7 +3,8 @@@
   * postinit.c
   *      postgres initialization utilities
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *
index cd548df8f6811764d4cce9649f29eef5c9a6322f,9c93df0f0a49efbecee3e7dc8cc60dd73cd9af46..be7adb2ac540b9281061a90b66df9e4a75926a77
@@@ -6,9 -6,7 +6,8 @@@
   * See src/backend/utils/misc/README for more information.
   *
   *
-  * Copyright (c) 2000-2012, PostgreSQL Global Development Group
-  * Copyright (c) 2000-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Copyright (c) 2000-2016, PostgreSQL Global Development Group
   * Written by Peter Eisentraut <[email protected]>.
   *
   * IDENTIFICATION
@@@ -377,23 -344,9 +376,23 @@@ static const struct config_enum_entry c
        {NULL, 0, false}
  };
  
 +#ifdef PGXC
 +/*
 + * Define remote connection types for PGXC
 + */
 +static const struct config_enum_entry pgxc_conn_types[] = {
 +      {"application", REMOTE_CONN_APP, false},
 +      {"coordinator", REMOTE_CONN_COORD, false},
 +      {"datanode", REMOTE_CONN_DATANODE, false},
 +      {"gtm", REMOTE_CONN_GTM, false},
 +      {"gtmproxy", REMOTE_CONN_GTM_PROXY, false},
 +      {NULL, 0, false}
 +};
 +#endif
 +
  /*
-  * Although only "on", "off", "remote_write", and "local" are documented, we
-  * accept all the likely variants of "on" and "off".
+  * Although only "on", "off", "remote_apply", "remote_write", and "local" are
+  * documented, we accept all the likely variants of "on" and "off".
   */
  static const struct config_enum_entry synchronous_commit_options[] = {
        {"local", SYNCHRONOUS_COMMIT_LOCAL_FLUSH, false},
@@@ -426,19 -380,19 +426,31 @@@ static const struct config_enum_entry h
        {NULL, 0, false}
  };
  
 +#ifdef XCP
 +/*
 + * Set global-snapshot source. 'gtm' is default, but user can choose
 + * 'coordinator' for performance improvement at the cost of reduced consistency
 + */
 +static const struct config_enum_entry global_snapshot_source_options[] = {
 +      {"gtm", GLOBAL_SNAPSHOT_SOURCE_GTM, true},
 +      {"coordinator", GLOBAL_SNAPSHOT_SOURCE_COORDINATOR, true},
 +      {NULL, 0, false}
 +};
 +#endif
 +
+ static const struct config_enum_entry force_parallel_mode_options[] = {
+       {"off", FORCE_PARALLEL_OFF, false},
+       {"on", FORCE_PARALLEL_ON, false},
+       {"regress", FORCE_PARALLEL_REGRESS, false},
+       {"true", FORCE_PARALLEL_ON, true},
+       {"false", FORCE_PARALLEL_OFF, true},
+       {"yes", FORCE_PARALLEL_ON, true},
+       {"no", FORCE_PARALLEL_OFF, true},
+       {"1", FORCE_PARALLEL_ON, true},
+       {"0", FORCE_PARALLEL_OFF, true},
+       {NULL, 0, false}
+ };
  /*
   * Options for enum values stored in other modules
   */
@@@ -501,9 -450,14 +513,17 @@@ int                      tcp_keepalives_idle
  int                   tcp_keepalives_interval;
  int                   tcp_keepalives_count;
  
 +#ifdef XCP
 +char     *storm_catalog_remap_string;
 +#endif
+ /*
+  * SSL renegotiation was been removed in PostgreSQL 9.5, but we tolerate it
+  * being set to zero (meaning never renegotiate) for backward compatibility.
+  * This avoids breaking compatibility with clients that have never supported
+  * renegotiation and therefore always try to zero it.
+  */
+ int                   ssl_renegotiation_limit;
  /*
   * This really belongs in pg_shmem.c, but is defined here so that it doesn't
   * need to be duplicated in all the different implementations of pg_shmem.c.
@@@ -4036,19 -3796,15 +4196,28 @@@ static struct config_enum ConfigureName
                NULL, NULL, NULL
        },
  
 +#ifdef XCP
 +      {
 +              {"global_snapshot_source", PGC_USERSET, DEVELOPER_OPTIONS,
 +                      gettext_noop("Set preferred source of a snapshot."),
 +                      gettext_noop("When set to 'coordinator', a snapshot is taken at "
 +                                      "the coordinator at the risk of reduced consistency. "
 +                                      "Default is 'gtm'")
 +              },
 +              &GlobalSnapshotSource,
 +              GLOBAL_SNAPSHOT_SOURCE_GTM, global_snapshot_source_options,
 +              NULL, NULL, NULL
 +      },
 +#endif
+       {
+               {"force_parallel_mode", PGC_USERSET, QUERY_TUNING_OTHER,
+                       gettext_noop("Forces use of parallel query facilities."),
+                       gettext_noop("If possible, run query using a parallel worker and with parallel restrictions.")
+               },
+               &force_parallel_mode,
+               FORCE_PARALLEL_OFF, force_parallel_mode_options,
+               NULL, NULL, NULL
+       },
  
        /* End-of-list marker */
        {
index 1de0a847d8363407d6ef99cb98f22653349d31b1,6d0666c44fc865a9a7e85d5dc76500dcc1bbf9de..2163979637cdaffd5fd60341de4fdd53cec32f63
  #huge_pages = try                     # on, off, or try
                                        # (change requires restart)
  #temp_buffers = 8MB                   # min 800kB
 -#max_prepared_transactions =                # zero disables the feature
 +#max_prepared_transactions = 10               # zero disables the feature
                                        # (change requires restart)
- # Note:  Increasing max_prepared_transactions costs ~600 bytes of shared memory
- # per transaction slot, plus lock space (see max_locks_per_transaction).
- # It is not advisable to set max_prepared_transactions nonzero unless you
- # actively intend to use prepared transactions.
+ # Caution: it is not advisable to set max_prepared_transactions nonzero unless
+ # you actively intend to use prepared transactions.
  #work_mem = 4MB                               # min 64kB
  #maintenance_work_mem = 64MB          # min 1MB
+ #replacement_sort_tuples = 150000     # limits use of replacement selection sort
  #autovacuum_work_mem = -1             # min 1MB, or -1 to use maintenance_work_mem
  #max_stack_depth = 2MB                        # min 100kB
  #dynamic_shared_memory_type = posix   # the default is the first option
  # - Asynchronous Behavior -
  
  #effective_io_concurrency = 1         # 1-1000; 0 disables prefetching
- #max_worker_processes = 8
+ #max_worker_processes = 8             # (change requires restart)
+ #max_parallel_workers_per_gather = 2  # taken from max_worker_processes
+ #old_snapshot_threshold = -1          # 1min-60d; -1 disables; 0 is immediate
+                                                                       # (change requires restart)
+ #backend_flush_after = 0              # 0 disables, default is 0
  
 +# - Shared queues -
 +
 +#shared_queues = 64                   # min 16   
 +#shared_queue_size = 64KB             # min 16KB
  
  #------------------------------------------------------------------------------
  # WRITE AHEAD LOG
  #cpu_tuple_cost = 0.01                        # same scale as above
  #cpu_index_tuple_cost = 0.005         # same scale as above
  #cpu_operator_cost = 0.0025           # same scale as above
 +#network_byte_cost = 0.001            # same scale as above
 +#remote_query_cost = 100.0            # same scale as above
+ #parallel_tuple_cost = 0.1            # same scale as above
+ #parallel_setup_cost = 1000.0 # same scale as above
+ #min_parallel_relation_size = 8MB
  #effective_cache_size = 4GB
  
  # - Genetic Query Optimizer -
index a0d3bd03ac3f9ba69ed65e7c65b836bd68e1855f,6b7894213c10daa5968fd54f5605189172609749..6b62f37c23e0fa4f16e4a332b472ff3c4206b918
@@@ -54,11 -51,10 +51,13 @@@ MemoryContext CurTransactionContext = N
  /* This is a transient link to the active portal's memory context: */
  MemoryContext PortalContext = NULL;
  
--static void MemoryContextCallResetCallbacks(MemoryContext context);
 +static void MemoryContextStatsInternal(MemoryContext context, int level);
 +#ifdef PGXC
 +void *allocTopCxt(size_t s);
 +#endif
+ static void MemoryContextStatsInternal(MemoryContext context, int level,
+                                                  bool print, int max_children,
+                                                  MemoryContextCounters *totals);
  
  /*
   * You should not do memory allocations within a critical section, because
index 6bacad5bf092c85951cde948bccc55bc46ae1bd0,425cae12ea1725d6829a7ac83812e0a00d1e9390..776d2ae893b129664b965d165ff8707684d17b22
@@@ -8,8 -8,7 +8,8 @@@
   * doesn't actually run the executor for them.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
  #include "miscadmin.h"
  #include "utils/builtins.h"
  #include "utils/memutils.h"
+ #include "utils/snapmgr.h"
  #include "utils/timestamp.h"
  
 +#ifdef PGXC
 +#include "pgxc/pgxc.h"
 +#include "access/hash.h"
 +#include "catalog/pg_collation.h"
 +#include "utils/formatting.h"
 +#include "utils/lsyscache.h"
 +#endif
 +
  /*
   * Estimate of the maximum number of open portals a user would have,
   * used in initially sizing the PortalHashTable in EnablePortalManager().
@@@ -588,26 -528,20 +610,40 @@@ PortalDrop(Portal portal, bool isTopCom
        /* drop cached plan reference, if any */
        PortalReleaseCachedPlan(portal);
  
 +#ifdef XCP
 +      /*
 +       * Skip memory release if portal is still producining, means has tuples in
 +       * local memory, and has to push them to consumers. It would loose the
 +       * tuples if free the memory now.
 +       * The cleanup should be completed if the portal finished producing.
 +       */
 +      if (portalIsProducing(portal))
 +              return;
 +
 +      if (portal->queryDesc)
 +      {
 +              ResourceOwner saveResourceOwner = CurrentResourceOwner;
 +              CurrentResourceOwner = portal->resowner;
 +              FreeQueryDesc(portal->queryDesc);
 +              CurrentResourceOwner = saveResourceOwner;
 +              portal->queryDesc = NULL;
 +      }
 +#endif
 +
+       /*
+        * If portal has a snapshot protecting its data, release that.  This needs
+        * a little care since the registration will be attached to the portal's
+        * resowner; if the portal failed, we will already have released the
+        * resowner (and the snapshot) during transaction abort.
+        */
+       if (portal->holdSnapshot)
+       {
+               if (portal->resowner)
+                       UnregisterSnapshotFromOwner(portal->holdSnapshot,
+                                                                               portal->resowner);
+               portal->holdSnapshot = NULL;
+       }
        /*
         * Release any resources still attached to the portal.  There are several
         * cases being covered here:
index 3bdfcbaceabf5cddf7d6de92c96078ea0aa0fd6f,07075ce06de062f2422bf040bf64ce981c687583..31c1cde4e6f02ce544a40d3ce4bd65332b22e3e7
@@@ -59,10 -114,16 +117,17 @@@ typedef struct ResourceOwnerDat
        ResourceOwner nextchild;        /* next child of same parent */
        const char *name;                       /* name (just for debugging) */
  
-       /* We have built-in support for remembering owned buffers */
-       int                     nbuffers;               /* number of owned buffer pins */
-       Buffer     *buffers;            /* dynamically allocated array */
-       int                     maxbuffers;             /* currently allocated array size */
+       /* We have built-in support for remembering: */
+       ResourceArray bufferarr;        /* owned buffers */
+       ResourceArray catrefarr;        /* catcache references */
+       ResourceArray catlistrefarr;    /* catcache-list pins */
+       ResourceArray relrefarr;        /* relcache references */
+       ResourceArray planrefarr;       /* plancache references */
+       ResourceArray tupdescarr;       /* tupdesc references */
+       ResourceArray snapshotarr;      /* snapshot references */
+       ResourceArray filearr;          /* open temporary files */
+       ResourceArray dsmarr;           /* dynamic shmem segments */
++      ResourceArray prepstmts;        /* prepared statements */
  
        /* We can remember up to MAX_RESOWNER_LOCKS references to local locks. */
        int                     nlocks;                 /* number of owned locks */
@@@ -404,24 -660,15 +671,26 @@@ ResourceOwnerReleaseInternal(ResourceOw
                }
  
                /* Ditto for temporary files */
-               while (owner->nfiles > 0)
+               while (ResourceArrayGetAny(&(owner->filearr), &foundres))
                {
+                       File            res = DatumGetFile(foundres);
                        if (isCommit)
-                               PrintFileLeakWarning(owner->files[owner->nfiles - 1]);
-                       FileClose(owner->files[owner->nfiles - 1]);
+                               PrintFileLeakWarning(res);
+                       FileClose(res);
                }
  
 +#ifdef XCP
 +              /* Ditto for prepared statements */
 +              while (owner->nstmts > 0)
 +              {
 +                      char *stmt = owner->stmts + ((owner->nstmts - 1) * CNAME_MAXLEN);
 +                      if (isCommit)
 +                              PrintPreparedStmtLeakWarning(stmt);
 +                      DropPreparedStatement(stmt, false);
 +              }
 +#endif
 +
                /* Clean up index scans too */
                ReleaseResources_hash();
        }
@@@ -472,28 -719,15 +741,16 @@@ ResourceOwnerDelete(ResourceOwner owner
        ResourceOwnerNewParent(owner, NULL);
  
        /* And free the object. */
-       if (owner->buffers)
-               pfree(owner->buffers);
-       if (owner->catrefs)
-               pfree(owner->catrefs);
-       if (owner->catlistrefs)
-               pfree(owner->catlistrefs);
-       if (owner->relrefs)
-               pfree(owner->relrefs);
-       if (owner->planrefs)
-               pfree(owner->planrefs);
-       if (owner->tupdescs)
-               pfree(owner->tupdescs);
-       if (owner->snapshots)
-               pfree(owner->snapshots);
-       if (owner->files)
-               pfree(owner->files);
-       if (owner->dsms)
-               pfree(owner->dsms);
- #ifdef XCP
-       if (owner->stmts)
-               pfree(owner->stmts);
- #endif
+       ResourceArrayFree(&(owner->bufferarr));
+       ResourceArrayFree(&(owner->catrefarr));
+       ResourceArrayFree(&(owner->catlistrefarr));
+       ResourceArrayFree(&(owner->relrefarr));
+       ResourceArrayFree(&(owner->planrefarr));
+       ResourceArrayFree(&(owner->tupdescarr));
+       ResourceArrayFree(&(owner->snapshotarr));
+       ResourceArrayFree(&(owner->filearr));
+       ResourceArrayFree(&(owner->dsmarr));
++      ResourceArrayFree(&(owner->prepstmts));
  
        pfree(owner);
  }
index bc4f3274546b5d9f7675657ccc0606809c41252e,510565c339e49272d71b7ec1ef6d7a2f2210d8bc..604e2a57f402f82b16986ba629c56cc02ab9dde1
   * above.  Nonetheless, with large workMem we can have many tapes.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
@@@ -231,11 -262,9 +270,12 @@@ struct Tuplesortstat
        int64           allowedMem;             /* total memory allowed, in bytes */
        int                     maxTapes;               /* number of tapes (Knuth's T) */
        int                     tapeRange;              /* maxTapes-1 (Knuth's P) */
-       MemoryContext sortcontext;      /* memory context holding all sort data */
+       MemoryContext sortcontext;      /* memory context holding most sort data */
+       MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
        LogicalTapeSet *tapeset;        /* logtape.c object for tapes in a temp file */
 +#ifdef PGXC
 +      ResponseCombiner *combiner; /* tuple source, alternate to tapeset */
 +#endif /* PGXC */
  
        /*
         * These function pointers decouple the routines that must know what kind
        void            (*readtup) (Tuplesortstate *state, SortTuple *stup,
                                                                                int tapenum, unsigned int len);
  
 +#ifdef PGXC
 +      /*
 +       * Function to read length of next stored tuple.
 +       * Used as 'len' parameter for readtup function.
 +       */
 +      unsigned int (*getlen) (Tuplesortstate *state, int tapenum, bool eofOK);
 +#endif
 +
+       /*
+        * Function to move a caller tuple.  This is usually implemented as a
+        * memmove() shim, but function may also perform additional fix-up of
+        * caller tuple where needed.  Batch memory support requires the movement
+        * of caller tuples from one location in memory to another.
+        */
+       void            (*movetup) (void *dest, void *src, unsigned int len);
        /*
         * This array holds the tuples now in sort memory.  If we are in state
         * INITIAL, the tuples are in no particular order; if we are in state
  #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
  #define WRITETUP(state,tape,stup)     ((*(state)->writetup) (state, tape, stup))
  #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
- #define LACKMEM(state)                ((state)->availMem < 0)
 +#ifdef PGXC
 +#define GETLEN(state,tape,eofOK) ((*(state)->getlen) (state, tape, eofOK))
 +#endif
+ #define MOVETUP(dest,src,len) ((*(state)->movetup) (dest, src, len))
+ #define LACKMEM(state)                ((state)->availMem < 0 && !(state)->batchUsed)
  #define USEMEM(state,amt)     ((state)->availMem -= (amt))
  #define FREEMEM(state,amt)    ((state)->availMem += (amt))
  
@@@ -505,12 -582,7 +604,13 @@@ static void writetup_heap(Tuplesortstat
                          SortTuple *stup);
  static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
                         int tapenum, unsigned int len);
 +#ifdef PGXC
 +static unsigned int getlen_datanode(Tuplesortstate *state, int tapenum,
 +                              bool eofOK);
 +static void readtup_datanode(Tuplesortstate *state, SortTuple *stup,
 +                               int tapenum, unsigned int len);
 +#endif
+ static void movetup_heap(void *dest, void *src, unsigned int len);
  static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
                                   Tuplesortstate *state);
  static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
@@@ -663,9 -764,7 +792,10 @@@ tuplesort_begin_heap(TupleDesc tupDesc
        state->copytup = copytup_heap;
        state->writetup = writetup_heap;
        state->readtup = readtup_heap;
 +#ifdef PGXC
 +      state->getlen = getlen;
 +#endif
+       state->movetup = movetup_heap;
  
        state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
        state->abbrevNext = 10;
@@@ -829,9 -929,7 +960,10 @@@ tuplesort_begin_index_btree(Relation he
        state->copytup = copytup_index;
        state->writetup = writetup_index;
        state->readtup = readtup_index;
 +#ifdef PGXC
 +      state->getlen = getlen;
 +#endif
+       state->movetup = movetup_index;
        state->abbrevNext = 10;
  
        state->heapRel = heapRel;
@@@ -899,9 -997,7 +1031,10 @@@ tuplesort_begin_index_hash(Relation hea
        state->copytup = copytup_index;
        state->writetup = writetup_index;
        state->readtup = readtup_index;
 +#ifdef PGXC
 +      state->getlen = getlen;
 +#endif
+       state->movetup = movetup_index;
  
        state->heapRel = heapRel;
        state->indexRel = indexRel;
@@@ -944,9 -1040,7 +1077,10 @@@ tuplesort_begin_datum(Oid datumType, Oi
        state->copytup = copytup_datum;
        state->writetup = writetup_datum;
        state->readtup = readtup_datum;
 +#ifdef PGXC
 +      state->getlen = getlen;
 +#endif
+       state->movetup = movetup_datum;
        state->abbrevNext = 10;
  
        state->datumType = datumType;
@@@ -3189,6 -3849,43 +3991,42 @@@ markrunend(Tuplesortstate *state, int t
        LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
  }
  
 -
+ /*
+  * Get memory for tuple from within READTUP() routine.  Allocate
+  * memory and account for that, or consume from tape's batch
+  * allocation.
+  *
+  * Memory returned here in the final on-the-fly merge case is recycled
+  * from tape's batch allocation.  Otherwise, callers must pfree() or
+  * reset tuple child memory context, and account for that with a
+  * FREEMEM().  Currently, this only ever needs to happen in WRITETUP()
+  * routines.
+  */
+ static void *
+ readtup_alloc(Tuplesortstate *state, int tapenum, Size tuplen)
+ {
+       if (state->batchUsed)
+       {
+               /*
+                * No USEMEM() call, because during final on-the-fly merge accounting
+                * is based on tape-private state. ("Overflow" allocations are
+                * detected as an indication that a new round or preloading is
+                * required. Preloading marks existing contents of tape's batch buffer
+                * for reuse.)
+                */
+               return mergebatchalloc(state, tapenum, tuplen);
+       }
+       else
+       {
+               char       *ret;
+               /* Batch allocation yet to be performed */
+               ret = MemoryContextAlloc(state->tuplecontext, tuplen);
+               USEMEM(state, GetMemoryChunkSpace(ret));
+               return ret;
+       }
+ }
  /*
   * Routines specialized for HeapTuple (actually MinimalTuple) case
   */
@@@ -3380,54 -4081,12 +4222,60 @@@ readtup_heap(Tuplesortstate *state, Sor
                                                                &stup->isnull1);
  }
  
 +#ifdef PGXC
 +static unsigned int
 +getlen_datanode(Tuplesortstate *state, int tapenum, bool eofOK)
 +{
 +      ResponseCombiner *combiner = state->combiner;
 +      TupleTableSlot   *dstslot = combiner->ss.ps.ps_ResultTupleSlot;
 +      TupleTableSlot   *slot;
 +
 +      combiner->current_conn = tapenum;
 +      slot = FetchTuple(combiner);
 +      if (TupIsNull(slot))
 +      {
 +              if (eofOK)
 +                      return 0;
 +              else
 +                      elog(ERROR, "unexpected end of data");
 +      }
 +
 +      if (slot != dstslot)
 +              ExecCopySlot(dstslot, slot);
 +
 +      return 1;
 +}
 +
 +static void
 +readtup_datanode(Tuplesortstate *state, SortTuple *stup,
 +                               int tapenum, unsigned int len)
 +{
 +      TupleTableSlot *slot = state->combiner->ss.ps.ps_ResultTupleSlot;
 +      MinimalTuple tuple;
 +      HeapTupleData htup;
 +
 +      Assert(!TupIsNull(slot));
 +
 +      /* copy the tuple into sort storage */
 +      tuple = ExecCopySlotMinimalTuple(slot);
 +      stup->tuple = (void *) tuple;
 +      USEMEM(state, GetMemoryChunkSpace(tuple));
 +      /* set up first-column key value */
 +      htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
 +      htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
 +      stup->datum1 = heap_getattr(&htup,
 +                                                              state->sortKeys[0].ssup_attno,
 +                                                              state->tupDesc,
 +                                                              &stup->isnull1);
 +}
 +#endif /* PGXC */
 +
+ static void
+ movetup_heap(void *dest, void *src, unsigned int len)
+ {
+       memmove(dest, src, len);
+ }
  /*
   * Routines specialized for the CLUSTER case (HeapTuple data, with
   * comparisons per a btree index definition)
index 09080bb938dd5270b5495d829da43e7fa01e9185,1347fc4520a7fdafeead2912e67b0fcaa8832ab8..24b51bf28b346e94551d14455e0e0d4f30790acf
@@@ -43,8 -43,7 +43,8 @@@
   * before switching to the other state or activating a different read pointer.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
index 4facb4899e8b2dc8f9ba7d83f4b771db1cf6ab0c,f033d1d5c9cd01b25401ed4f09d2b64bec6e20fd..6923149fab380bb1f906029e9ff62b9d65049691
@@@ -30,8 -30,7 +30,8 @@@
   * destroyed at the end of each transaction.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
index 4d6855ef872026fba3efb10a6027b91c7ec9564f,1ec9f70f0eeff3dfe47b4dea17faa32d271f5472..ff7362cb49cc921ad45fc01abea146ecd7848e2a
@@@ -31,8 -31,7 +31,8 @@@
   * for too long.)
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * IDENTIFICATION
  #include "utils/snapmgr.h"
  #include "utils/syscache.h"
  #include "utils/tqual.h"
 -
 +#ifdef PGXC
 +#include "pgxc/pgxc.h"
 +#endif
  
+ /*
+  * GUC parameters
+  */
+ int                   old_snapshot_threshold;         /* number of minutes, -1 disables */
+ /*
+  * Structure for dealing with old_snapshot_threshold implementation.
+  */
+ typedef struct OldSnapshotControlData
+ {
+       /*
+        * Variables for old snapshot handling are shared among processes and are
+        * only allowed to move forward.
+        */
+       slock_t         mutex_current;  /* protect current_timestamp */
+       int64           current_timestamp;              /* latest snapshot timestamp */
+       slock_t         mutex_latest_xmin;              /* protect latest_xmin and
+                                                                                * next_map_update */
+       TransactionId latest_xmin;      /* latest snapshot xmin */
+       int64           next_map_update;        /* latest snapshot valid up to */
+       slock_t         mutex_threshold;        /* protect threshold fields */
+       int64           threshold_timestamp;    /* earlier snapshot is old */
+       TransactionId threshold_xid;    /* earlier xid may be gone */
+       /*
+        * Keep one xid per minute for old snapshot error handling.
+        *
+        * Use a circular buffer with a head offset, a count of entries currently
+        * used, and a timestamp corresponding to the xid at the head offset.  A
+        * count_used value of zero means that there are no times stored; a
+        * count_used value of OLD_SNAPSHOT_TIME_MAP_ENTRIES means that the buffer
+        * is full and the head must be advanced to add new entries.  Use
+        * timestamps aligned to minute boundaries, since that seems less
+        * surprising than aligning based on the first usage timestamp.  The
+        * latest bucket is effectively stored within latest_xmin.  The circular
+        * buffer is updated when we get a new xmin value that doesn't fall into
+        * the same interval.
+        *
+        * It is OK if the xid for a given time slot is from earlier than
+        * calculated by adding the number of minutes corresponding to the
+        * (possibly wrapped) distance from the head offset to the time of the
+        * head entry, since that just results in the vacuuming of old tuples
+        * being slightly less aggressive.  It would not be OK for it to be off in
+        * the other direction, since it might result in vacuuming tuples that are
+        * still expected to be there.
+        *
+        * Use of an SLRU was considered but not chosen because it is more
+        * heavyweight than is needed for this, and would probably not be any less
+        * code to implement.
+        *
+        * Persistence is not needed.
+        */
+       int                     head_offset;    /* subscript of oldest tracked time */
+       int64           head_timestamp; /* time corresponding to head xid */
+       int                     count_used;             /* how many slots are in use */
+       TransactionId xid_by_minute[FLEXIBLE_ARRAY_MEMBER];
+ } OldSnapshotControlData;
+ static volatile OldSnapshotControlData *oldSnapshotControl;
  /*
   * CurrentSnapshot points to the only snapshot taken in transaction-snapshot
   * mode, and to the latest one taken in a read-committed transaction.
Simple merge
Simple merge
index 2ab3cb5386dccd581a18222118a174e3b932d093,73cb7ee683d8d8f7a4d71ca01ae04b9f97499cf9..3580c155434e4fa4b231e94746b600da708d94a2
@@@ -38,9 -38,7 +38,8 @@@
   *
   * This code is released under the terms of the PostgreSQL License.
   *
-  * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/bin/initdb/initdb.c
@@@ -193,15 -195,10 +202,14 @@@ static char *authwarning = NULL
   * (no quoting to worry about).
   */
  static const char *boot_options = "-F";
 -static const char *backend_options = "--single -F -O -j -c search_path=pg_catalog -c exit_on_error=true";
 +static const char *backend_options = "--single "
 +#ifdef PGXC
 +                                       "--localxid "
 +#endif
-                                        "-F -O -c search_path=pg_catalog -c exit_on_error=true";
++                                                                       "-F -O -j -c search_path=pg_catalog -c exit_on_error=true";
  
- static const char *subdirs[] = {
+ static const char *const subdirs[] = {
        "global",
-       "pg_xlog",
        "pg_xlog/archive_status",
        "pg_clog",
        "pg_commit_ts",
@@@ -256,24 -253,21 +264,24 @@@ static void set_null_conf(void)
  static void test_config_settings(void);
  static void setup_config(void);
  static void bootstrap_template1(void);
- static void setup_auth(void);
- static void get_set_pwd(void);
- static void setup_depend(void);
- static void setup_sysviews(void);
+ static void setup_auth(FILE *cmdfd);
+ static void get_set_pwd(FILE *cmdfd);
+ static void setup_depend(FILE *cmdfd);
+ static void setup_sysviews(FILE *cmdfd);
 +#ifdef PGXC
 +static void setup_nodeself(void);
 +#endif
- static void setup_description(void);
- static void setup_collation(void);
- static void setup_conversion(void);
- static void setup_dictionary(void);
- static void setup_privileges(void);
+ static void setup_description(FILE *cmdfd);
+ static void setup_collation(FILE *cmdfd);
+ static void setup_conversion(FILE *cmdfd);
+ static void setup_dictionary(FILE *cmdfd);
+ static void setup_privileges(FILE *cmdfd);
  static void set_info_version(void);
- static void setup_schema(void);
- static void load_plpgsql(void);
- static void vacuum_db(void);
- static void make_template0(void);
- static void make_postgres(void);
+ static void setup_schema(FILE *cmdfd);
+ static void load_plpgsql(FILE *cmdfd);
+ static void vacuum_db(FILE *cmdfd);
+ static void make_template0(FILE *cmdfd);
+ static void make_postgres(FILE *cmdfd);
  static void fsync_pgdata(void);
  static void trapsig(int signum);
  static void check_ok(void);
@@@ -1811,42 -1717,9 +1738,38 @@@ setup_sysviews(FILE *cmdfd
                free(*line);
        }
  
-       PG_CMD_CLOSE;
        free(sysviews_setup);
-       check_ok();
  }
  
 +#ifdef PGXC
 +/*
 + * set up Postgres-XC cluster node catalog data with node self
 + * which is the node currently initialized.
 + */
 +static void
 +setup_nodeself(void)
 +{
 +      PG_CMD_DECL;
 +
 +      fputs(_("creating cluster information ... "), stdout);
 +      fflush(stdout);
 +
 +      snprintf(cmd, sizeof(cmd),
 +                       "\"%s\" %s template1 >%s",
 +                       backend_exec, backend_options,
 +                       DEVNULL);
 +
 +      PG_CMD_OPEN;
 +
 +      PG_CMD_PRINTF1("CREATE NODE %s WITH (type = 'coordinator');\n",
 +                                 nodename);
 +
 +      PG_CMD_CLOSE;
 +
 +      check_ok();
 +}
 +#endif
 +
  /*
   * load description data
   */
@@@ -1882,19 -1743,15 +1793,19 @@@ setup_description(FILE *cmdfd
        PG_CMD_PUTS("CREATE TEMP TABLE tmp_pg_shdescription ( "
                                " objoid oid, "
                                " classname name, "
-                               " description text) WITHOUT OIDS;\n");
+                               " description text) WITHOUT OIDS;\n\n");
  
-       PG_CMD_PRINTF1("COPY tmp_pg_shdescription FROM E'%s';\n",
+       PG_CMD_PRINTF1("COPY tmp_pg_shdescription FROM E'%s';\n\n",
                                   escape_quotes(shdesc_file));
  
 +#ifdef XCP
 +      PG_CMD_PUTS("INSERT INTO pg_catalog.pg_shdescription "
 +#else
        PG_CMD_PUTS("INSERT INTO pg_shdescription "
 +#endif
                                " SELECT t.objoid, c.oid, t.description "
                                "  FROM tmp_pg_shdescription t, pg_class c "
-                               "   WHERE c.relname = t.classname;\n");
+                               "   WHERE c.relname = t.classname;\n\n");
  
        /* Create default descriptions for operator implementation functions */
        PG_CMD_PUTS("WITH funcdescs AS ( "
@@@ -2192,14 -2013,129 +2067,132 @@@ setup_privileges(FILE *cmdfd
        char      **priv_lines;
        static char *privileges_setup[] = {
                "UPDATE pg_class "
-               "  SET relacl = E'{\"=r/\\\\\"$POSTGRES_SUPERUSERNAME\\\\\"\"}' "
-               "  WHERE relkind IN ('r', 'v', 'm', 'S') AND relacl IS NULL;\n",
-               "GRANT USAGE ON SCHEMA pg_catalog TO PUBLIC;\n",
-               "GRANT CREATE, USAGE ON SCHEMA public TO PUBLIC;\n",
+               "  SET relacl = (SELECT array_agg(a.acl) FROM "
+               " (SELECT E'=r/\"$POSTGRES_SUPERUSERNAME\"' as acl "
+               "  UNION SELECT unnest(pg_catalog.acldefault("
+               "    CASE WHEN relkind = 'S' THEN 's' ELSE 'r' END::\"char\",10::oid))"
+               " ) as a) "
+               "  WHERE relkind IN ('r', 'v', 'm', 'S') AND relacl IS NULL;\n\n",
+               "GRANT USAGE ON SCHEMA pg_catalog TO PUBLIC;\n\n",
+               "GRANT CREATE, USAGE ON SCHEMA public TO PUBLIC;\n\n",
 +#ifdef XCP
 +        "GRANT USAGE ON SCHEMA storm_catalog TO PUBLIC;\n",
 +#endif
-               "REVOKE ALL ON pg_largeobject FROM PUBLIC;\n",
+               "REVOKE ALL ON pg_largeobject FROM PUBLIC;\n\n",
+               "INSERT INTO pg_init_privs "
+               "  (objoid, classoid, objsubid, initprivs, privtype)"
+               "    SELECT"
+               "        oid,"
+               "        (SELECT oid FROM pg_class WHERE relname = 'pg_class'),"
+               "        0,"
+               "        relacl,"
+               "        'i'"
+               "    FROM"
+               "        pg_class"
+               "    WHERE"
+               "        relacl IS NOT NULL"
+               "        AND relkind IN ('r', 'v', 'm', 'S');",
+               "INSERT INTO pg_init_privs "
+               "  (objoid, classoid, objsubid, initprivs, privtype)"
+               "    SELECT"
+               "        pg_class.oid,"
+               "        (SELECT oid FROM pg_class WHERE relname = 'pg_class'),"
+               "        pg_attribute.attnum,"
+               "        pg_attribute.attacl,"
+               "        'i'"
+               "    FROM"
+               "        pg_class"
+               "        JOIN pg_attribute ON (pg_class.oid = pg_attribute.attrelid)"
+               "    WHERE"
+               "        pg_attribute.attacl IS NOT NULL"
+               "        AND pg_class.relkind IN ('r', 'v', 'm', 'S');",
+               "INSERT INTO pg_init_privs "
+               "  (objoid, classoid, objsubid, initprivs, privtype)"
+               "    SELECT"
+               "        oid,"
+               "        (SELECT oid FROM pg_class WHERE relname = 'pg_proc'),"
+               "        0,"
+               "        proacl,"
+               "        'i'"
+               "    FROM"
+               "        pg_proc"
+               "    WHERE"
+               "        proacl IS NOT NULL;",
+               "INSERT INTO pg_init_privs "
+               "  (objoid, classoid, objsubid, initprivs, privtype)"
+               "    SELECT"
+               "        oid,"
+               "        (SELECT oid FROM pg_class WHERE relname = 'pg_type'),"
+               "        0,"
+               "        typacl,"
+               "        'i'"
+               "    FROM"
+               "        pg_type"
+               "    WHERE"
+               "        typacl IS NOT NULL;",
+               "INSERT INTO pg_init_privs "
+               "  (objoid, classoid, objsubid, initprivs, privtype)"
+               "    SELECT"
+               "        oid,"
+               "        (SELECT oid FROM pg_class WHERE relname = 'pg_language'),"
+               "        0,"
+               "        lanacl,"
+               "        'i'"
+               "    FROM"
+               "        pg_language"
+               "    WHERE"
+               "        lanacl IS NOT NULL;",
+               "INSERT INTO pg_init_privs "
+               "  (objoid, classoid, objsubid, initprivs, privtype)"
+               "    SELECT"
+               "        oid,"
+               "        (SELECT oid FROM pg_class WHERE "
+               "                 relname = 'pg_largeobject_metadata'),"
+               "        0,"
+               "        lomacl,"
+               "        'i'"
+               "    FROM"
+               "        pg_largeobject_metadata"
+               "    WHERE"
+               "        lomacl IS NOT NULL;",
+               "INSERT INTO pg_init_privs "
+               "  (objoid, classoid, objsubid, initprivs, privtype)"
+               "    SELECT"
+               "        oid,"
+               "        (SELECT oid FROM pg_class WHERE relname = 'pg_namespace'),"
+               "        0,"
+               "        nspacl,"
+               "        'i'"
+               "    FROM"
+               "        pg_namespace"
+               "    WHERE"
+               "        nspacl IS NOT NULL;",
+               "INSERT INTO pg_init_privs "
+               "  (objoid, classoid, objsubid, initprivs, privtype)"
+               "    SELECT"
+               "        oid,"
+               "        (SELECT oid FROM pg_class WHERE "
+               "                 relname = 'pg_foreign_data_wrapper'),"
+               "        0,"
+               "        fdwacl,"
+               "        'i'"
+               "    FROM"
+               "        pg_foreign_data_wrapper"
+               "    WHERE"
+               "        fdwacl IS NOT NULL;",
+               "INSERT INTO pg_init_privs "
+               "  (objoid, classoid, objsubid, initprivs, privtype)"
+               "    SELECT"
+               "        oid,"
+               "        (SELECT oid FROM pg_class "
+               "                 WHERE relname = 'pg_foreign_server'),"
+               "        0,"
+               "        srvacl,"
+               "        'i'"
+               "    FROM"
+               "        pg_foreign_server"
+               "    WHERE"
+               "        srvacl IS NOT NULL;",
                NULL
        };
  
@@@ -2313,57 -2208,11 +2265,41 @@@ setup_schema(FILE *cmdfd
   * load PL/pgsql server-side language
   */
  static void
- load_plpgsql(void)
+ load_plpgsql(FILE *cmdfd)
  {
-       PG_CMD_DECL;
-       fputs(_("loading PL/pgSQL server-side language ... "), stdout);
-       fflush(stdout);
-       snprintf(cmd, sizeof(cmd),
-                        "\"%s\" %s template1 >%s",
-                        backend_exec, backend_options,
-                        DEVNULL);
-       PG_CMD_OPEN;
-       PG_CMD_PUTS("CREATE EXTENSION plpgsql;\n");
-       PG_CMD_CLOSE;
-       check_ok();
+       PG_CMD_PUTS("CREATE EXTENSION plpgsql;\n\n");
  }
  
 +#ifdef PGXC
 +/*
 + * Vacuum Freeze given database. This is required to prevent xid wraparound
 + * issues when a node is brought up with xids out-of-sync w.r.t. gtm xids.
 + */
 +static void
 +vacuumfreeze(char *dbname)
 +{
 +      PG_CMD_DECL;
 +      char msg[MAXPGPATH];
 +      snprintf(msg, sizeof(msg), "freezing database %s ... ", dbname);
 +
 +      fputs(_(msg), stdout);
 +      fflush(stdout);
 +
 +      snprintf(cmd, sizeof(cmd),
 +                       "\"%s\" %s %s >%s",
 +                       backend_exec, backend_options, dbname,
 +                       DEVNULL);
 +
 +      PG_CMD_OPEN;
 +
 +      PG_CMD_PUTS("VACUUM FREEZE;\n");
 +
 +      PG_CMD_CLOSE;
 +
 +      check_ok();
 +}
 +#endif /* PGXC */
 +
  /*
   * clean everything up in template1
   */
@@@ -2404,14 -2236,9 +2323,14 @@@ make_template0(FILE *cmdfd
                /*
                 * We use the OID of template0 to determine lastsysoid
                 */
 +#ifdef XCP
 +              "UPDATE pg_catalog.pg_database SET datlastsysoid = "
 +              "    (SELECT oid FROM pg_catalog.pg_database "
 +#else
                "UPDATE pg_database SET datlastsysoid = "
                "    (SELECT oid FROM pg_database "
-               "    WHERE datname = 'template0');\n",
 +#endif
+               "    WHERE datname = 'template0');\n\n",
  
                /*
                 * Explicitly revoke public create-schema and create-temp-table
                /*
                 * Finally vacuum to clean up dead rows in pg_database
                 */
-               "VACUUM FULL pg_catalog.pg_database;\n",
 +#ifdef XCP
-               "VACUUM FULL pg_database;\n",
++              "VACUUM pg_catalog.pg_database;\n",
 +#else
+               "VACUUM pg_database;\n\n",
 +#endif
                NULL
        };
  
@@@ -3449,46 -3265,51 +3370,61 @@@ initialize_data_directory(void
         */
        write_version_file("base/1");
  
-       /* Create the stuff we don't need to use bootstrap mode for */
+       /*
+        * Create the stuff we don't need to use bootstrap mode for, using a
+        * backend running in simple standalone mode.
+        */
+       fputs(_("performing post-bootstrap initialization ... "), stdout);
+       fflush(stdout);
+       snprintf(cmd, sizeof(cmd),
+                        "\"%s\" %s template1 >%s",
+                        backend_exec, backend_options,
+                        DEVNULL);
+       PG_CMD_OPEN;
  
-       setup_auth();
+       setup_auth(cmdfd);
        if (pwprompt || pwfilename)
-               get_set_pwd();
+               get_set_pwd(cmdfd);
  
-       setup_depend();
+       setup_depend(cmdfd);
  
-       setup_sysviews();
+       setup_sysviews(cmdfd);
  
 +#ifdef PGXC
 +      /* Initialize catalog information about the node self */
 +      setup_nodeself();
 +#endif
+       setup_description(cmdfd);
  
-       setup_description();
-       setup_collation();
+       setup_collation(cmdfd);
  
-       setup_conversion();
+       setup_conversion(cmdfd);
  
-       setup_dictionary();
+       setup_dictionary(cmdfd);
  
-       setup_privileges();
+       setup_privileges(cmdfd);
  
-       setup_schema();
+       setup_schema(cmdfd);
  
-       load_plpgsql();
+       load_plpgsql(cmdfd);
  
-       vacuum_db();
 +#ifdef XCP
 +#ifdef NOT_USED
 +      setup_storm();
 +#endif
 +#endif
 +
+       vacuum_db(cmdfd);
  
-       make_template0();
+       make_template0(cmdfd);
  
-       make_postgres();
+       make_postgres(cmdfd);
+       PG_CMD_CLOSE;
+       check_ok();
  }
  
  
index 816bc7ef2a588c7d81f7fc9d64969f69bd0e7ce6,efc07291adea715b0f39266da723c6cd47f1851d..03d9fbee349e4765f2051fa1b1d2aa3a24cea6d4
@@@ -432,32 -455,26 +458,37 @@@ start_postmaster(void
  
        /*
         * Since there might be quotes to handle here, it is easier simply to pass
-        * everything to a shell to process them.
-        *
-        * XXX it would be better to fork and exec so that we would know the child
-        * postmaster's PID directly; then test_postmaster_connection could use
-        * the PID without having to rely on reading it back from the pidfile.
+        * everything to a shell to process them.  Use exec so that the postmaster
+        * has the same PID as the current child process.
         */
        if (log_file != NULL)
-               snprintf(cmd, MAXPGPATH, "\"%s\" %s %s%s < \"%s\" >> \"%s\" 2>&1 &",
 +#ifdef PGXC
-               snprintf(cmd, MAXPGPATH, "\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1 &",
++              snprintf(cmd, MAXPGPATH, "exec \"%s\" %s %s%s < \"%s\" >> \"%s\" 2>&1 &",
 +                              exec_path, pgxcCommand, pgdata_opt, post_opts,
 +                              DEVNULL, log_file);
 +#else
+               snprintf(cmd, MAXPGPATH, "exec \"%s\" %s%s < \"%s\" >> \"%s\" 2>&1",
                                 exec_path, pgdata_opt, post_opts,
                                 DEVNULL, log_file);
 +#endif
        else
-               snprintf(cmd, MAXPGPATH, "\"%s\" %s %s%s < \"%s\" 2>&1 &",
 +#ifdef PGXC
-               snprintf(cmd, MAXPGPATH, "\"%s\" %s%s < \"%s\" 2>&1 &",
++              snprintf(cmd, MAXPGPATH, "exec \"%s\" %s %s%s < \"%s\" 2>&1 &",
 +                              exec_path, pgxcCommand, pgdata_opt, post_opts, DEVNULL);
 +#else
+               snprintf(cmd, MAXPGPATH, "exec \"%s\" %s%s < \"%s\" 2>&1",
                                 exec_path, pgdata_opt, post_opts, DEVNULL);
 +#endif
  
-       return system(cmd);
+       (void) execl("/bin/sh", "/bin/sh", "-c", cmd, (char *) NULL);
+       /* exec failed */
+       write_stderr(_("%s: could not start server: %s\n"),
+                                progname, strerror(errno));
+       exit(1);
+       return 0;                                       /* keep dumb compilers quiet */
  #else                                                 /* WIN32 */
  
        /*
index 968f5942d5f7afa3f7e021b02e9be21f56db1b36,a5c2d09e2949c6f553b80ca6fada1479fd51c252..06695f89342b0fd5ebd05d6fb72dabd85581be00
@@@ -4,8 -4,7 +4,8 @@@
   *      pg_dump is a utility for dumping out a postgres database
   *      into a script file.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   *    pg_dump will read the system catalogs in a database and dump out a
@@@ -125,12 -129,8 +130,12 @@@ char             g_comment_end[10]
  
  static const CatalogId nilCatalogId = {0, 0};
  
 +#ifdef PGXC
 +static int    include_nodes = 0;
 +#endif
 +
  static void help(const char *progname);
- static void setup_connection(Archive *AH, DumpOptions *dopt,
+ static void setup_connection(Archive *AH,
                                 const char *dumpencoding, const char *dumpsnapshot,
                                 char *use_role);
  static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
@@@ -1251,19 -1346,29 +1365,32 @@@ selectDumpableNamespace(NamespaceInfo *
         * namespaces. If specific namespaces are being dumped, dump just those
         * namespaces. Otherwise, dump all non-system namespaces.
         */
        if (table_include_oids.head != NULL)
-               nsinfo->dobj.dump = false;
+               nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
        else if (schema_include_oids.head != NULL)
-               nsinfo->dobj.dump = simple_oid_list_member(&schema_include_oids,
-                                                                                                  nsinfo->dobj.catId.oid);
+               nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
+                       simple_oid_list_member(&schema_include_oids,
+                                                                  nsinfo->dobj.catId.oid) ?
+                       DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
+       else if (fout->remoteVersion >= 90600 &&
+                        strncmp(nsinfo->dobj.name, "pg_catalog",
+                                        strlen("pg_catalog")) == 0)
+               /*
+                * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
+                * they are interesting (and not the original ACLs which were set at
+                * initdb time, see pg_init_privs).
+                */
+               nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
        else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
 +#ifdef XCP
 +                       strncmp(nsinfo->dobj.name, "storm_", 6) == 0 ||
 +#endif
                         strcmp(nsinfo->dobj.name, "information_schema") == 0)
-               nsinfo->dobj.dump = false;
+               nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
        else
-               nsinfo->dobj.dump = true;
+               nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
  
        /*
         * In any case, a namespace can be excluded by an exclusion switch
@@@ -4609,15 -5343,11 +5370,16 @@@ getTables(Archive *fout, int *numTables
                                                  "d.refobjid AS owning_tab, "
                                                  "d.refobjsubid AS owning_col, "
                                                  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
-                                                 "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
 +#ifdef PGXC
 +                                                "(SELECT pclocatortype from pgxc_class v where v.pcrelid = c.oid) AS pgxclocatortype,"
 +                                                "(SELECT pcattnum from pgxc_class v where v.pcrelid = c.oid) AS pgxcattnum,"
 +                                                "(SELECT string_agg(node_name,',') AS pgxc_node_names from pgxc_node n where n.oid in (select unnest(nodeoids) from pgxc_class v where v.pcrelid=c.oid) ) , "
 +#endif
+                                                 "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
                                                  "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
                                                  "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
-                                                 "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
+                                                 "tc.reloptions AS toast_reloptions, "
+                                                 "NULL AS changed_acl "
                                                  "FROM pg_class c "
                                                  "LEFT JOIN pg_depend d ON "
                                                  "(c.relkind = '%c' AND "
                                                  "d.refobjid AS owning_tab, "
                                                  "d.refobjsubid AS owning_col, "
                                                  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
-                                                 "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
 +#ifdef PGXC
 +                                                "(SELECT pclocatortype from pgxc_class v where v.pcrelid = c.oid) AS pgxclocatortype,"
 +                                                "(SELECT pcattnum from pgxc_class v where v.pcrelid = c.oid) AS pgxcattnum,"
 +                                                "(SELECT string_agg(node_name,',') AS pgxc_node_names from pgxc_node n where n.oid in (select unnest(nodeoids) from pgxc_class v where v.pcrelid=c.oid) ) , "
 +#endif
+                                                 "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
                                                  "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
                                                  "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
-                                                 "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
+                                                 "tc.reloptions AS toast_reloptions, "
+                                                 "NULL AS changed_acl "
                                                  "FROM pg_class c "
                                                  "LEFT JOIN pg_depend d ON "
                                                  "(c.relkind = '%c' AND "
                                                  "d.refobjid AS owning_tab, "
                                                  "d.refobjsubid AS owning_col, "
                                                  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
-                                                 "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
 +#ifdef PGXC
 +                                                "(SELECT pclocatortype from pgxc_class v where v.pcrelid = c.oid) AS pgxclocatortype,"
 +                                                "(SELECT pcattnum from pgxc_class v where v.pcrelid = c.oid) AS pgxcattnum,"
 +                                                "(SELECT string_agg(node_name,',') AS pgxc_node_names from pgxc_node n where n.oid in (select unnest(nodeoids) from pgxc_class v where v.pcrelid=c.oid) ) , "
 +#endif
+                                                 "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
                                                  "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
                                                  "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
-                                                 "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
+                                                 "tc.reloptions AS toast_reloptions, "
+                                                 "NULL AS changed_acl "
                                                  "FROM pg_class c "
                                                  "LEFT JOIN pg_depend d ON "
                                                  "(c.relkind = '%c' AND "
                                                  "d.refobjid AS owning_tab, "
                                                  "d.refobjsubid AS owning_col, "
                                                  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
-                                               "array_to_string(c.reloptions, ', ') AS reloptions, "
-                                                 "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
 +#ifdef PGXC
 +                                                "(SELECT pclocatortype from pgxc_class v where v.pcrelid = c.oid) AS pgxclocatortype,"
 +                                                "(SELECT pcattnum from pgxc_class v where v.pcrelid = c.oid) AS pgxcattnum,"
 +                                                "(SELECT string_agg(node_name,',') AS pgxc_node_names from pgxc_node n where n.oid in (select unnest(nodeoids) from pgxc_class v where v.pcrelid=c.oid) ) , "
 +#endif
+                                                 "c.reloptions AS reloptions, "
+                                                 "tc.reloptions AS toast_reloptions, "
+                                                 "NULL AS changed_acl "
                                                  "FROM pg_class c "
                                                  "LEFT JOIN pg_depend d ON "
                                                  "(c.relkind = '%c' AND "
Simple merge
index 15f6cfdaf5022df0b5ef5458a35b08d9fac2ed7c,54a9f482008489a7f3886fda8644a1624a731aa9..60c7ba5e8ed4468486752772d3f88af68baabf67
@@@ -2093,154 -2183,3 +2213,76 @@@ dumpTimestamp(const char *msg
        if (strftime(buf, sizeof(buf), PGDUMP_STRFTIME_FMT, localtime(&now)) != 0)
                fprintf(OPF, "-- %s %s\n\n", msg, buf);
  }
- /*
-  * Append the given string to the buffer, with suitable quoting for passing
-  * the string as a value, in a keyword/pair value in a libpq connection
-  * string
-  */
- static void
- doConnStrQuoting(PQExpBuffer buf, const char *str)
- {
-       const char *s;
-       bool            needquotes;
-       /*
-        * If the string consists entirely of plain ASCII characters, no need to
-        * quote it. This is quite conservative, but better safe than sorry.
-        */
-       needquotes = false;
-       for (s = str; *s; s++)
-       {
-               if (!((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') ||
-                         (*s >= '0' && *s <= '9') || *s == '_' || *s == '.'))
-               {
-                       needquotes = true;
-                       break;
-               }
-       }
-       if (needquotes)
-       {
-               appendPQExpBufferChar(buf, '\'');
-               while (*str)
-               {
-                       /* ' and \ must be escaped by to \' and \\ */
-                       if (*str == '\'' || *str == '\\')
-                               appendPQExpBufferChar(buf, '\\');
-                       appendPQExpBufferChar(buf, *str);
-                       str++;
-               }
-               appendPQExpBufferChar(buf, '\'');
-       }
-       else
-               appendPQExpBufferStr(buf, str);
- }
- /*
-  * Append the given string to the shell command being built in the buffer,
-  * with suitable shell-style quoting.
-  */
- static void
- doShellQuoting(PQExpBuffer buf, const char *str)
- {
-       const char *p;
- #ifndef WIN32
-       appendPQExpBufferChar(buf, '\'');
-       for (p = str; *p; p++)
-       {
-               if (*p == '\'')
-                       appendPQExpBufferStr(buf, "'\"'\"'");
-               else
-                       appendPQExpBufferChar(buf, *p);
-       }
-       appendPQExpBufferChar(buf, '\'');
- #else                                                 /* WIN32 */
-       appendPQExpBufferChar(buf, '"');
-       for (p = str; *p; p++)
-       {
-               if (*p == '"')
-                       appendPQExpBufferStr(buf, "\\\"");
-               else
-                       appendPQExpBufferChar(buf, *p);
-       }
-       appendPQExpBufferChar(buf, '"');
- #endif   /* WIN32 */
- }
 +
 +#ifdef PGXC
 +static void
 +dumpNodes(PGconn *conn)
 +{
 +      PQExpBuffer query;
 +      PGresult   *res;
 +      int                     num;
 +      int                     i;
 +
 +      query = createPQExpBuffer();
 +
 +      appendPQExpBuffer(query, "select 'CREATE NODE ' || node_name || '"
 +                                      " WITH (TYPE = ' || chr(39) || (case when node_type='C'"
 +                                      " then 'coordinator' else 'datanode' end) || chr(39)"
 +                                      " || ' , HOST = ' || chr(39) || node_host || chr(39)"
 +                                      " || ', PORT = ' || node_port || (case when nodeis_primary='t'"
 +                                      " then ', PRIMARY' else ' ' end) || (case when nodeis_preferred"
 +                                      " then ', PREFERRED' else ' ' end) || ');' "
 +                                      " as node_query from pg_catalog.pgxc_node order by oid");
 +
 +      res = executeQuery(conn, query->data);
 +
 +      num = PQntuples(res);
 +
 +      if (num > 0)
 +              fprintf(OPF, "--\n-- Nodes\n--\n\n");
 +
 +      for (i = 0; i < num; i++)
 +      {
 +              fprintf(OPF, "%s\n", PQgetvalue(res, i, PQfnumber(res, "node_query")));
 +      }
 +      fprintf(OPF, "\n");
 +
 +      PQclear(res);
 +      destroyPQExpBuffer(query);
 +}
 +
 +static void
 +dumpNodeGroups(PGconn *conn)
 +{
 +      PQExpBuffer query;
 +      PGresult   *res;
 +      int                     num;
 +      int                     i;
 +
 +      query = createPQExpBuffer();
 +
 +      appendPQExpBuffer(query,
 +                                              "select 'CREATE NODE GROUP ' || pgxc_group.group_name"
 +                                              " || ' WITH(' || string_agg(node_name,',') || ');'"
 +                                              " as group_query from pg_catalog.pgxc_node, pg_catalog.pgxc_group"
 +                                              " where pgxc_node.oid = any (pgxc_group.group_members)"
 +                                              " group by pgxc_group.group_name"
 +                                              " order by pgxc_group.group_name");
 +
 +      res = executeQuery(conn, query->data);
 +
 +      num = PQntuples(res);
 +
 +      if (num > 0)
 +              fprintf(OPF, "--\n-- Node groups\n--\n\n");
 +
 +      for (i = 0; i < num; i++)
 +      {
 +              fprintf(OPF, "%s\n", PQgetvalue(res, i, PQfnumber(res, "group_query")));
 +      }
 +      fprintf(OPF, "\n");
 +
 +      PQclear(res);
 +      destroyPQExpBuffer(query);
 +}
 +#endif
Simple merge
index c4b498276cef3cc735d66ca3d10a4921478c4f88,017b9c5b345d84605d71bee748d19b6990b0a684..3272d999eff89a34d305662a26528bae30f7097c
  #include "commands/dbcommands_xlog.h"
  #include "commands/sequence.h"
  #include "commands/tablespace.h"
+ #include "replication/message.h"
+ #include "replication/origin.h"
  #include "rmgrdesc.h"
- #include "storage/standby.h"
+ #include "storage/standbydefs.h"
  #include "utils/relmapper.h"
  
 +#ifdef XCP
 +#include "pgxc/barrier.h"
 +#endif
 +
  #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup) \
        { name, desc, identify},
  
index 687becdeadfa82afc9ccfb3b64e8d1a67a0e0549,87fb006d87966de71969daa765f00af09ac5a95d..28af19e20cedaa9075209ec88f0739751f5b4d81
@@@ -291,119 -307,92 +310,132 @@@ typedef struc
        int                     type;                   /* command type (SQL_COMMAND or META_COMMAND) */
        int                     argc;                   /* number of command words */
        char       *argv[MAX_ARGS]; /* command word list */
-       int                     cols[MAX_ARGS]; /* corresponding column starting from 1 */
-       PgBenchExpr *expr;                      /* parsed expression */
+       PgBenchExpr *expr;                      /* parsed expression, if needed */
+       SimpleStats stats;                      /* time spent in this command */
  } Command;
  
- typedef struct
+ typedef struct ParsedScript
  {
+       const char *desc;                       /* script descriptor (eg, file name) */
+       int                     weight;                 /* selection weight */
+       Command   **commands;           /* NULL-terminated array of Commands */
+       StatsData       stats;                  /* total time spent in script */
+ } ParsedScript;
+ static ParsedScript sql_script[MAX_SCRIPTS];  /* SQL script files */
+ static int    num_scripts;            /* number of scripts in sql_script[] */
+ static int    num_commands = 0;       /* total number of Command structs */
+ static int64 total_weight = 0;
  
-       long            start_time;             /* when does the interval start */
-       int                     cnt;                    /* number of transactions */
-       int                     skipped;                /* number of transactions skipped under --rate
-                                                                * and --latency-limit */
-       double          min_latency;    /* min/max latencies */
-       double          max_latency;
-       double          sum_latency;    /* sum(latency), sum(latency^2) - for
-                                                                * estimates */
-       double          sum2_latency;
+ static int    debug = 0;                      /* debug flag */
  
-       double          min_lag;
-       double          max_lag;
-       double          sum_lag;                /* sum(lag) */
-       double          sum2_lag;               /* sum(lag*lag) */
- } AggVals;
+ /* Builtin test scripts */
+ typedef struct BuiltinScript
+ {
+       const char *name;                       /* very short name for -b ... */
+       const char *desc;                       /* short description */
+       const char *script;                     /* actual pgbench script */
+ } BuiltinScript;
  
- static Command **sql_files[MAX_FILES];        /* SQL script files */
- static int    num_files;                      /* number of script files */
- static int    num_commands = 0;       /* total number of Command structs */
- static int    debug = 0;                      /* debug flag */
 +
- /* default scenario */
- static char *tpc_b = {
-       "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
-       "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
-       "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
-       "\\setrandom aid 1 :naccounts\n"
-       "\\setrandom bid 1 :nbranches\n"
-       "\\setrandom tid 1 :ntellers\n"
-       "\\setrandom delta -5000 5000\n"
-       "BEGIN;\n"
-       "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n"
-       "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
-       "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid;\n"
-       "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n"
-       "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
-       "END;\n"
- };
 +
+ static const BuiltinScript builtin_script[] =
+ {
+       {
+               "tpcb-like",
+               "<builtin: TPC-B (sort of)>",
+               "\\set aid random(1, " CppAsString2(naccounts) " * :scale)\n"
+               "\\set bid random(1, " CppAsString2(nbranches) " * :scale)\n"
+               "\\set tid random(1, " CppAsString2(ntellers) " * :scale)\n"
+               "\\set delta random(-5000, 5000)\n"
+               "BEGIN;\n"
+               "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n"
+               "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
+               "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid;\n"
+               "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n"
+               "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
+               "END;\n"
+       },
 +#ifdef PGXC
- static char *tpc_b_bid = {
-       "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
-       "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
-       "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
-       "\\setrandom aid 1 :naccounts\n"
-       "\\setrandom bid 1 :nbranches\n"
-       "\\setrandom tid 1 :ntellers\n"
-       "\\setrandom delta -5000 5000\n"
-       "BEGIN;\n"
-       "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n"
-       "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid\n"
-       "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid AND bid = :bid;\n"
-       "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n"
-       "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
-       "END;\n"
- };
++      {
++              "tpcb-like-bid",
++              "<builtin: TPC-B (sort of)>",
++              "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
++              "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
++              "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
++              "\\setrandom aid 1 :naccounts\n"
++              "\\setrandom bid 1 :nbranches\n"
++              "\\setrandom tid 1 :ntellers\n"
++              "\\setrandom delta -5000 5000\n"
++              "BEGIN;\n"
++              "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n"
++              "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid\n"
++              "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid AND bid = :bid;\n"
++              "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n"
++              "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
++              "END;\n"
++      },
 +#endif
- /* -N case */
- static char *simple_update = {
-       "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
-       "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
-       "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
-       "\\setrandom aid 1 :naccounts\n"
-       "\\setrandom bid 1 :nbranches\n"
-       "\\setrandom tid 1 :ntellers\n"
-       "\\setrandom delta -5000 5000\n"
-       "BEGIN;\n"
-       "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n"
-       "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
-       "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
-       "END;\n"
- };
+       {
+               "simple-update",
+               "<builtin: simple update>",
+               "\\set aid random(1, " CppAsString2(naccounts) " * :scale)\n"
+               "\\set bid random(1, " CppAsString2(nbranches) " * :scale)\n"
+               "\\set tid random(1, " CppAsString2(ntellers) " * :scale)\n"
+               "\\set delta random(-5000, 5000)\n"
+               "BEGIN;\n"
+               "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n"
+               "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
+               "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
+               "END;\n"
+       },
 +#ifdef PGXC
- static char *simple_update_bid = {
-       "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
-       "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
-       "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
-       "\\setrandom aid 1 :naccounts\n"
-       "\\setrandom bid 1 :nbranches\n"
-       "\\setrandom tid 1 :ntellers\n"
-       "\\setrandom delta -5000 5000\n"
-       "BEGIN;\n"
-       "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n"
-       "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid;\n"
-       "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
-       "END;\n"
- };
++      {
++              "simple-update-bid",
++              "<builtin: simple update bid>",
++              "\\set nbranches " CppAsString2(nbranches) " * :scale\n"
++              "\\set ntellers " CppAsString2(ntellers) " * :scale\n"
++              "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
++              "\\setrandom aid 1 :naccounts\n"
++              "\\setrandom bid 1 :nbranches\n"
++              "\\setrandom tid 1 :ntellers\n"
++              "\\setrandom delta -5000 5000\n"
++              "BEGIN;\n"
++              "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n"
++              "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid;\n"
++              "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n"
++              "END;\n"
++      },
 +#endif
- /* -S case */
- static char *select_only = {
-       "\\set naccounts " CppAsString2(naccounts) " * :scale\n"
-       "\\setrandom aid 1 :naccounts\n"
-       "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
+       {
+               "select-only",
+               "<builtin: select only>",
+               "\\set aid random(1, " CppAsString2(naccounts) " * :scale)\n"
+               "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n"
+       }
  };
  
  /* Function prototypes */
- static void setalarm(int seconds);
+ static void setIntValue(PgBenchValue *pv, int64 ival);
+ static void setDoubleValue(PgBenchValue *pv, double dval);
+ static bool evaluateExpr(TState *, CState *, PgBenchExpr *, PgBenchValue *);
+ static void doLog(TState *thread, CState *st, instr_time *now,
+         StatsData *agg, bool skipped, double latency, double lag);
+ static void processXactStats(TState *thread, CState *st, instr_time *now,
+                                bool skipped, StatsData *agg);
+ static void pgbench_error(const char *fmt,...) pg_attribute_printf(1, 2);
+ static void addScript(ParsedScript script);
  static void *threadRun(void *arg);
+ static void setalarm(int seconds);
+ /* callback functions for our flex lexer */
+ static const PsqlScanCallbacks pgbench_callbacks = {
+       NULL,                                           /* don't need get_variable functionality */
+       pgbench_error
+ };
  
- static void doLog(TState *thread, CState *st, FILE *logfile, instr_time *now,
-         AggVals *agg, bool skipped);
  
  static void
  usage(void)
                   "  -C, --connect            establish new connection for each transaction\n"
                   "  -D, --define=VARNAME=VALUE\n"
          "                           define variable for use by custom script\n"
-                "  -f, --file=FILENAME      read transaction script from FILENAME\n"
 +#ifdef PGXC
 +                 "  -k           query with default key and additional key branch id (bid)\n"
 +#endif
                   "  -j, --jobs=NUM           number of threads (default: 1)\n"
                   "  -l, --log                write transaction times to log file\n"
-       "  -L, --latency-limit=NUM  count transactions lasting more than NUM ms\n"
-                  "                           as late.\n"
+                  "  -L, --latency-limit=NUM  count transactions lasting more than NUM ms as late\n"
                   "  -M, --protocol=simple|extended|prepared\n"
                   "                           protocol for submitting queries (default: simple)\n"
                   "  -n, --no-vacuum          do not run VACUUM before tests\n"
@@@ -2967,12 -3464,10 +3573,14 @@@ main(int argc, char **argv
        state = (CState *) pg_malloc(sizeof(CState));
        memset(state, 0, sizeof(CState));
  
-       while ((c = getopt_long(argc, argv, "ih:knvp:dqSNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
 +#ifdef PGXC
-       while ((c = getopt_long(argc, argv, "ih:nvp:dqSNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
++      while ((c = getopt_long(argc, argv, "ih:knvp:dqb:SNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
 +#else
+       while ((c = getopt_long(argc, argv, "ih:nvp:dqb:SNc:j:Crs:t:T:U:lf:D:F:M:P:R:L:", long_options, &optindex)) != -1)
 +#endif
        {
+               char       *script;
                switch (c)
                {
                        case 'i':
Simple merge
Simple merge
Simple merge
index 9b39bf5b633b5b9d1d1a5b82cfd641104ac29a89,8469d9ff03deae4388569a66a128e91f6619a05e..a645a2f22c36816bb66ae7b887c7f86fde448e89
@@@ -791,10 -877,8 +893,11 @@@ typedef struc
  #define THING_NO_SHOW         (THING_NO_CREATE | THING_NO_DROP)
  
  static const pgsql_thing_t words_after_create[] = {
+       {"ACCESS METHOD", NULL, NULL},
        {"AGGREGATE", NULL, &Query_for_list_of_aggregates},
 +#ifdef PGXC
 +      {"BARRIER", NULL, NULL},        /* Comes barrier name next, so skip it */
 +#endif
        {"CAST", NULL, NULL},           /* Casts have complex structures for names, so
                                                                 * skip it */
        {"COLLATION", "SELECT pg_catalog.quote_ident(collname) FROM pg_catalog.pg_collation WHERE collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) AND substring(pg_catalog.quote_ident(collname),1,%d)='%s'"},
        {"GROUP", Query_for_list_of_roles},
        {"LANGUAGE", Query_for_list_of_languages},
        {"INDEX", NULL, &Query_for_list_of_indexes},
-       {"MATERIALIZED VIEW", NULL, NULL},
 +#ifdef PGXC
 +      {"NODE", Query_for_list_of_available_nodenames},
 +      {"NODE GROUP", Query_for_list_of_available_nodegroup_names},
 +#endif
+       {"MATERIALIZED VIEW", NULL, &Query_for_list_of_matviews},
        {"OPERATOR", NULL, NULL},       /* Querying for this is probably not such a
                                                                 * good idea. */
        {"OWNED", NULL, NULL, THING_NO_CREATE},         /* for DROP OWNED BY ... */
@@@ -931,30 -1121,154 +1155,168 @@@ psql_completion(const char *text, int s
  #define prev4_wd  (previous_words[3])
  #define prev5_wd  (previous_words[4])
  #define prev6_wd  (previous_words[5])
+ #define prev7_wd  (previous_words[6])
+ #define prev8_wd  (previous_words[7])
+ #define prev9_wd  (previous_words[8])
+       /* Macros for matching the last N words before point, case-insensitively. */
+ #define TailMatches1(p1) \
+       (previous_words_count >= 1 && \
+        word_matches(p1, prev_wd))
+ #define TailMatches2(p2, p1) \
+       (previous_words_count >= 2 && \
+        word_matches(p1, prev_wd) && \
+        word_matches(p2, prev2_wd))
+ #define TailMatches3(p3, p2, p1) \
+       (previous_words_count >= 3 && \
+        word_matches(p1, prev_wd) && \
+        word_matches(p2, prev2_wd) && \
+        word_matches(p3, prev3_wd))
+ #define TailMatches4(p4, p3, p2, p1) \
+       (previous_words_count >= 4 && \
+        word_matches(p1, prev_wd) && \
+        word_matches(p2, prev2_wd) && \
+        word_matches(p3, prev3_wd) && \
+        word_matches(p4, prev4_wd))
+ #define TailMatches5(p5, p4, p3, p2, p1) \
+       (previous_words_count >= 5 && \
+        word_matches(p1, prev_wd) && \
+        word_matches(p2, prev2_wd) && \
+        word_matches(p3, prev3_wd) && \
+        word_matches(p4, prev4_wd) && \
+        word_matches(p5, prev5_wd))
+ #define TailMatches6(p6, p5, p4, p3, p2, p1) \
+       (previous_words_count >= 6 && \
+        word_matches(p1, prev_wd) && \
+        word_matches(p2, prev2_wd) && \
+        word_matches(p3, prev3_wd) && \
+        word_matches(p4, prev4_wd) && \
+        word_matches(p5, prev5_wd) && \
+        word_matches(p6, prev6_wd))
+ #define TailMatches7(p7, p6, p5, p4, p3, p2, p1) \
+       (previous_words_count >= 7 && \
+        word_matches(p1, prev_wd) && \
+        word_matches(p2, prev2_wd) && \
+        word_matches(p3, prev3_wd) && \
+        word_matches(p4, prev4_wd) && \
+        word_matches(p5, prev5_wd) && \
+        word_matches(p6, prev6_wd) && \
+        word_matches(p7, prev7_wd))
+ #define TailMatches8(p8, p7, p6, p5, p4, p3, p2, p1) \
+       (previous_words_count >= 8 && \
+        word_matches(p1, prev_wd) && \
+        word_matches(p2, prev2_wd) && \
+        word_matches(p3, prev3_wd) && \
+        word_matches(p4, prev4_wd) && \
+        word_matches(p5, prev5_wd) && \
+        word_matches(p6, prev6_wd) && \
+        word_matches(p7, prev7_wd) && \
+        word_matches(p8, prev8_wd))
+ #define TailMatches9(p9, p8, p7, p6, p5, p4, p3, p2, p1) \
+       (previous_words_count >= 9 && \
+        word_matches(p1, prev_wd) && \
+        word_matches(p2, prev2_wd) && \
+        word_matches(p3, prev3_wd) && \
+        word_matches(p4, prev4_wd) && \
+        word_matches(p5, prev5_wd) && \
+        word_matches(p6, prev6_wd) && \
+        word_matches(p7, prev7_wd) && \
+        word_matches(p8, prev8_wd) && \
+        word_matches(p9, prev9_wd))
+       /* Macros for matching the last N words before point, case-sensitively. */
+ #define TailMatchesCS1(p1) \
+       (previous_words_count >= 1 && \
+        word_matches_cs(p1, prev_wd))
+ #define TailMatchesCS2(p2, p1) \
+       (previous_words_count >= 2 && \
+        word_matches_cs(p1, prev_wd) && \
+        word_matches_cs(p2, prev2_wd))
+       /*
+        * Macros for matching N words beginning at the start of the line,
+        * case-insensitively.
+        */
+ #define Matches1(p1) \
+       (previous_words_count == 1 && \
+        TailMatches1(p1))
+ #define Matches2(p1, p2) \
+       (previous_words_count == 2 && \
+        TailMatches2(p1, p2))
+ #define Matches3(p1, p2, p3) \
+       (previous_words_count == 3 && \
+        TailMatches3(p1, p2, p3))
+ #define Matches4(p1, p2, p3, p4) \
+       (previous_words_count == 4 && \
+        TailMatches4(p1, p2, p3, p4))
+ #define Matches5(p1, p2, p3, p4, p5) \
+       (previous_words_count == 5 && \
+        TailMatches5(p1, p2, p3, p4, p5))
+ #define Matches6(p1, p2, p3, p4, p5, p6) \
+       (previous_words_count == 6 && \
+        TailMatches6(p1, p2, p3, p4, p5, p6))
+ #define Matches7(p1, p2, p3, p4, p5, p6, p7) \
+       (previous_words_count == 7 && \
+        TailMatches7(p1, p2, p3, p4, p5, p6, p7))
+ #define Matches8(p1, p2, p3, p4, p5, p6, p7, p8) \
+       (previous_words_count == 8 && \
+        TailMatches8(p1, p2, p3, p4, p5, p6, p7, p8))
+ #define Matches9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \
+       (previous_words_count == 9 && \
+        TailMatches9(p1, p2, p3, p4, p5, p6, p7, p8, p9))
  
+       /*
+        * Macros for matching N words at the start of the line, regardless of
+        * what is after them, case-insensitively.
+        */
+ #define HeadMatches1(p1) \
+       (previous_words_count >= 1 && \
+        word_matches(p1, previous_words[previous_words_count - 1]))
+ #define HeadMatches2(p1, p2) \
+       (previous_words_count >= 2 && \
+        word_matches(p1, previous_words[previous_words_count - 1]) && \
+        word_matches(p2, previous_words[previous_words_count - 2]))
+ #define HeadMatches3(p1, p2, p3) \
+       (previous_words_count >= 3 && \
+        word_matches(p1, previous_words[previous_words_count - 1]) && \
+        word_matches(p2, previous_words[previous_words_count - 2]) && \
+        word_matches(p3, previous_words[previous_words_count - 3]))
+       /* Known command-starting keywords. */
        static const char *const sql_commands[] = {
 +#ifdef PGXC
 +              /* 
 +               * Added "CLEAN" and "EXECUTE DIRECT"
 +               * Removed LISTEN, NOTIFY, RELEASE, SAVEPOINT and UNLISTEN
 +               */
 +              "ABORT", "ALTER", "ANALYZE", "BEGIN", "CHECKPOINT", "CLEAN CONNECTION", "CLOSE", "CLUSTER",
 +              "COMMENT", "COMMIT", "COPY", "CREATE", "DEALLOCATE", "DECLARE",
 +              "DELETE FROM", "DISCARD", "DO", "DROP", "END", "EXECUTE", "EXECUTE DIRECT", "EXPLAIN", "FETCH",
 +              "GRANT", "INSERT",           "LOAD", "LOCK", "MOVE",           "PREPARE",
 +              "REASSIGN", "REINDEX",            "RESET", "REVOKE", "ROLLBACK",
 +                           "SECURITY LABEL", "SELECT", "SET", "SHOW", "START",
 +              "TABLE", "TRUNCATE",             "UPDATE", "VACUUM", "VALUES", "WITH",
 +#else
                "ABORT", "ALTER", "ANALYZE", "BEGIN", "CHECKPOINT", "CLOSE", "CLUSTER",
                "COMMENT", "COMMIT", "COPY", "CREATE", "DEALLOCATE", "DECLARE",
                "DELETE FROM", "DISCARD", "DO", "DROP", "END", "EXECUTE", "EXPLAIN",
                "FETCH", "GRANT", "IMPORT", "INSERT", "LISTEN", "LOAD", "LOCK",
                "MOVE", "NOTIFY", "PREPARE",
-               "REASSIGN", "REFRESH", "REINDEX", "RELEASE", "RESET", "REVOKE", "ROLLBACK",
+               "REASSIGN", "REFRESH MATERIALIZED VIEW", "REINDEX", "RELEASE",
+               "RESET", "REVOKE", "ROLLBACK",
                "SAVEPOINT", "SECURITY LABEL", "SELECT", "SET", "SHOW", "START",
                "TABLE", "TRUNCATE", "UNLISTEN", "UPDATE", "VACUUM", "VALUES", "WITH",
 +#endif
                NULL
        };
  
  /* ALTER */
  
        /* ALTER TABLE */
-       else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev_wd, "TABLE") == 0)
-       {
+       else if (Matches2("ALTER", "TABLE"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables,
                                                                   "UNION SELECT 'ALL IN TABLESPACE'");
-       }
  
-       /*
-        * complete with what you can alter (TABLE, GROUP, USER, ...) unless we're
-        * in ALTER TABLE sth ALTER
-        */
-       else if (pg_strcasecmp(prev_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "TABLE") != 0)
+       /* ALTER something */
+       else if (Matches1("ALTER"))
        {
                static const char *const list_ALTER[] =
 +#ifdef PGXC
 +              /*
 +               * Added: "NODE" (NODE NAME cannot be altered).
 +               * Removed: "FOREIGN DATA WRAPPER", "FOREIGN TABLE", "LARGE OBJECT",
 +               *          "SERVER", "TRIGGER", "USER MAPPING FOR".
 +               */
 +              {"AGGREGATE", "COLLATION", "CONVERSION", "DATABASE", "DEFAULT PRIVILEGES", "DOMAIN",
 +                      "EXTENSION",                                          "FUNCTION",
 +               "GROUP", "INDEX", "LANGUAGE", "NODE", "NODE GROUP", "OPERATOR",
 +                      "ROLE", "SCHEMA",           "SEQUENCE",  "TABLE",
 +                      "TABLESPACE", "TEXT SEARCH",           "TYPE",
 +              "USER",                     "VIEW", NULL};
 +#else
                {"AGGREGATE", "COLLATION", "CONVERSION", "DATABASE", "DEFAULT PRIVILEGES", "DOMAIN",
                        "EVENT TRIGGER", "EXTENSION", "FOREIGN DATA WRAPPER", "FOREIGN TABLE", "FUNCTION",
                        "GROUP", "INDEX", "LANGUAGE", "LARGE OBJECT", "MATERIALIZED VIEW", "OPERATOR",
                else
                        COMPLETE_WITH_FUNCTION_ARG(prev2_wd);
        }
-       else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev_wd, "NODE") == 0)
-       {
 +#ifdef PGXC
 +      /* ALTER NODE */
-       }
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "NODE") == 0)
-       {
++      else if (Matches2("ALTER", "NODE"))
 +              COMPLETE_WITH_QUERY(Query_for_list_of_available_nodenames);
-       }
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "NODE") == 0 &&
-                        pg_strcasecmp(prev_wd, "WITH") == 0)
-       {
++      else if (Matches2("ALTER", "NODE"))
 +              COMPLETE_WITH_CONST("WITH");
-       }
-       else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev4_wd, "NODE") == 0 &&
-                        pg_strcasecmp(prev2_wd, "WITH") == 0)
-       {
-               static const char *const list_NODEOPTIONS[] =
-               {"TYPE", "HOST", "PORT", "PRIMARY", "PREFERRED", NULL};
++      else if (Matches3("ALTER", "NODE", "WITH"))
 +              COMPLETE_WITH_CONST("(");
++      else if (Matches3("ALTER", "NODE", "WITH"))
  
-               COMPLETE_WITH_LIST(list_NODEOPTIONS);
-       }
++              COMPLETE_WITH_LIST5("TYPE", "HOST", "PORT", "PRIMARY", "PREFERRED");
 +#endif
        /* ALTER SCHEMA <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "SCHEMA") == 0)
-       {
-               static const char *const list_ALTERGEN[] =
-               {"OWNER TO", "RENAME TO", NULL};
-               COMPLETE_WITH_LIST(list_ALTERGEN);
-       }
+       else if (Matches3("ALTER", "SCHEMA", MatchAny))
+               COMPLETE_WITH_LIST2("OWNER TO", "RENAME TO");
  
        /* ALTER COLLATION <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "COLLATION") == 0)
-       {
-               static const char *const list_ALTERGEN[] =
-               {"OWNER TO", "RENAME TO", "SET SCHEMA", NULL};
-               COMPLETE_WITH_LIST(list_ALTERGEN);
-       }
+       else if (Matches3("ALTER", "COLLATION", MatchAny))
+               COMPLETE_WITH_LIST3("OWNER TO", "RENAME TO", "SET SCHEMA");
  
        /* ALTER CONVERSION <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "CONVERSION") == 0)
-       {
-               static const char *const list_ALTERGEN[] =
-               {"OWNER TO", "RENAME TO", "SET SCHEMA", NULL};
-               COMPLETE_WITH_LIST(list_ALTERGEN);
-       }
+       else if (Matches3("ALTER", "CONVERSION", MatchAny))
+               COMPLETE_WITH_LIST3("OWNER TO", "RENAME TO", "SET SCHEMA");
  
        /* ALTER DATABASE <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "DATABASE") == 0)
-       {
-               static const char *const list_ALTERDATABASE[] =
-               {"RESET", "SET", "OWNER TO", "RENAME TO", "IS_TEMPLATE",
-               "ALLOW_CONNECTIONS", "CONNECTION LIMIT", NULL};
-               COMPLETE_WITH_LIST(list_ALTERDATABASE);
-       }
+       else if (Matches3("ALTER", "DATABASE", MatchAny))
+               COMPLETE_WITH_LIST7("RESET", "SET", "OWNER TO", "RENAME TO",
+                                                       "IS_TEMPLATE", "ALLOW_CONNECTIONS",
+                                                       "CONNECTION LIMIT");
  
        /* ALTER EVENT TRIGGER */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "EVENT") == 0 &&
-                        pg_strcasecmp(prev_wd, "TRIGGER") == 0)
-       {
+       else if (Matches3("ALTER", "EVENT", "TRIGGER"))
                COMPLETE_WITH_QUERY(Query_for_list_of_event_triggers);
-       }
  
        /* ALTER EVENT TRIGGER <name> */
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "EVENT") == 0 &&
-                        pg_strcasecmp(prev2_wd, "TRIGGER") == 0)
-       {
-               static const char *const list_ALTER_EVENT_TRIGGER[] =
-               {"DISABLE", "ENABLE", "OWNER TO", "RENAME TO", NULL};
-               COMPLETE_WITH_LIST(list_ALTER_EVENT_TRIGGER);
-       }
+       else if (Matches4("ALTER", "EVENT", "TRIGGER", MatchAny))
+               COMPLETE_WITH_LIST4("DISABLE", "ENABLE", "OWNER TO", "RENAME TO");
  
        /* ALTER EVENT TRIGGER <name> ENABLE */
-       else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev4_wd, "EVENT") == 0 &&
-                        pg_strcasecmp(prev3_wd, "TRIGGER") == 0 &&
-                        pg_strcasecmp(prev_wd, "ENABLE") == 0)
-       {
-               static const char *const list_ALTER_EVENT_TRIGGER_ENABLE[] =
-               {"REPLICA", "ALWAYS", NULL};
-               COMPLETE_WITH_LIST(list_ALTER_EVENT_TRIGGER_ENABLE);
-       }
+       else if (Matches5("ALTER", "EVENT", "TRIGGER", MatchAny, "ENABLE"))
+               COMPLETE_WITH_LIST2("REPLICA", "ALWAYS");
  
        /* ALTER EXTENSION <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "EXTENSION") == 0)
-       {
-               static const char *const list_ALTEREXTENSION[] =
-               {"ADD", "DROP", "UPDATE", "SET SCHEMA", NULL};
-               COMPLETE_WITH_LIST(list_ALTEREXTENSION);
-       }
+       else if (Matches3("ALTER", "EXTENSION", MatchAny))
+               COMPLETE_WITH_LIST4("ADD", "DROP", "UPDATE", "SET SCHEMA");
  
 +#ifndef PGXC
 +      /* PGXCTODO: This should be re-enabled once FOREIGN DATA WRAPPER is supported */
        /* ALTER FOREIGN */
-       else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev_wd, "FOREIGN") == 0)
-       {
-               static const char *const list_ALTER_FOREIGN[] =
-               {"DATA WRAPPER", "TABLE", NULL};
-               COMPLETE_WITH_LIST(list_ALTER_FOREIGN);
-       }
+       else if (Matches2("ALTER", "FOREIGN"))
+               COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE");
  
        /* ALTER FOREIGN DATA WRAPPER <name> */
-       else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev4_wd, "FOREIGN") == 0 &&
-                        pg_strcasecmp(prev3_wd, "DATA") == 0 &&
-                        pg_strcasecmp(prev2_wd, "WRAPPER") == 0)
-       {
-               static const char *const list_ALTER_FDW[] =
-               {"HANDLER", "VALIDATOR", "OPTIONS", "OWNER TO", NULL};
-               COMPLETE_WITH_LIST(list_ALTER_FDW);
-       }
+       else if (Matches5("ALTER", "FOREIGN", "DATA", "WRAPPER", MatchAny))
+               COMPLETE_WITH_LIST5("HANDLER", "VALIDATOR", "OPTIONS", "OWNER TO", "RENAME TO");
  
        /* ALTER FOREIGN TABLE <name> */
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "FOREIGN") == 0 &&
-                        pg_strcasecmp(prev2_wd, "TABLE") == 0)
+       else if (Matches4("ALTER", "FOREIGN", "TABLE", MatchAny))
        {
                static const char *const list_ALTER_FOREIGN_TABLE[] =
                {"ADD", "ALTER", "DISABLE TRIGGER", "DROP", "ENABLE", "INHERIT",
  
                COMPLETE_WITH_LIST(list_ALTER_FOREIGN_TABLE);
        }
 +#endif
  
        /* ALTER INDEX */
-       else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev_wd, "INDEX") == 0)
-       {
+       else if (Matches2("ALTER", "INDEX"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes,
                                                                   "UNION SELECT 'ALL IN TABLESPACE'");
-       }
        /* ALTER INDEX <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "INDEX") == 0)
-       {
-               static const char *const list_ALTERINDEX[] =
-               {"OWNER TO", "RENAME TO", "SET", "RESET", NULL};
-               COMPLETE_WITH_LIST(list_ALTERINDEX);
-       }
+       else if (Matches3("ALTER", "INDEX", MatchAny))
+               COMPLETE_WITH_LIST4("OWNER TO", "RENAME TO", "SET", "RESET");
        /* ALTER INDEX <name> SET */
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "INDEX") == 0 &&
-                        pg_strcasecmp(prev_wd, "SET") == 0)
-       {
-               static const char *const list_ALTERINDEXSET[] =
-               {"(", "TABLESPACE", NULL};
-               COMPLETE_WITH_LIST(list_ALTERINDEXSET);
-       }
+       else if (Matches4("ALTER", "INDEX", MatchAny, "SET"))
+               COMPLETE_WITH_LIST2("(", "TABLESPACE");
        /* ALTER INDEX <name> RESET */
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "INDEX") == 0 &&
-                        pg_strcasecmp(prev_wd, "RESET") == 0)
+       else if (Matches4("ALTER", "INDEX", MatchAny, "RESET"))
                COMPLETE_WITH_CONST("(");
        /* ALTER INDEX <foo> SET|RESET ( */
-       else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev4_wd, "INDEX") == 0 &&
-                        (pg_strcasecmp(prev2_wd, "SET") == 0 ||
-                         pg_strcasecmp(prev2_wd, "RESET") == 0) &&
-                        pg_strcasecmp(prev_wd, "(") == 0)
-       {
-               static const char *const list_INDEXOPTIONS[] =
-               {"fillfactor", "fastupdate", "gin_pending_list_limit", NULL};
-               COMPLETE_WITH_LIST(list_INDEXOPTIONS);
-       }
+       else if (Matches5("ALTER", "INDEX", MatchAny, "RESET", "("))
+               COMPLETE_WITH_LIST3("fillfactor", "fastupdate",
+                                                       "gin_pending_list_limit");
+       else if (Matches5("ALTER", "INDEX", MatchAny, "SET", "("))
+               COMPLETE_WITH_LIST3("fillfactor =", "fastupdate =",
+                                                       "gin_pending_list_limit =");
  
        /* ALTER LANGUAGE <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "LANGUAGE") == 0)
-       {
-               static const char *const list_ALTERLANGUAGE[] =
-               {"OWNER TO", "RENAME TO", NULL};
-               COMPLETE_WITH_LIST(list_ALTERLANGUAGE);
-       }
+       else if (Matches3("ALTER", "LANGUAGE", MatchAny))
+               COMPLETE_WITH_LIST2("OWNER_TO", "RENAME TO");
  
        /* ALTER LARGE OBJECT <oid> */
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "LARGE") == 0 &&
-                        pg_strcasecmp(prev2_wd, "OBJECT") == 0)
-       {
-               static const char *const list_ALTERLARGEOBJECT[] =
-               {"OWNER TO", NULL};
-               COMPLETE_WITH_LIST(list_ALTERLARGEOBJECT);
-       }
+       else if (Matches4("ALTER", "LARGE", "OBJECT", MatchAny))
+               COMPLETE_WITH_CONST("OWNER TO");
  
        /* ALTER MATERIALIZED VIEW */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "MATERIALIZED") == 0 &&
-                        pg_strcasecmp(prev_wd, "VIEW") == 0)
-       {
+       else if (Matches3("ALTER", "MATERIALIZED", "VIEW"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews,
                                                                   "UNION SELECT 'ALL IN TABLESPACE'");
-       }
  
        /* ALTER USER,ROLE <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        !(pg_strcasecmp(prev2_wd, "USER") == 0 && pg_strcasecmp(prev_wd, "MAPPING") == 0) &&
-                        (pg_strcasecmp(prev2_wd, "USER") == 0 ||
-                         pg_strcasecmp(prev2_wd, "ROLE") == 0))
+       else if (Matches3("ALTER", "USER|ROLE", MatchAny) &&
+                        !TailMatches2("USER", "MAPPING"))
        {
                static const char *const list_ALTERUSER[] =
                {"BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE",
  
                COMPLETE_WITH_LIST(list_ALTERSEQUENCE);
        }
--      /* ALTER SEQUENCE <name> NO */
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "SEQUENCE") == 0 &&
-                        pg_strcasecmp(prev_wd, "NO") == 0)
-       {
-               static const char *const list_ALTERSEQUENCE2[] =
-               {"MINVALUE", "MAXVALUE", "CYCLE", NULL};
-               COMPLETE_WITH_LIST(list_ALTERSEQUENCE2);
-       }
+       else if (Matches4("ALTER", "SEQUENCE", MatchAny, "NO"))
+               COMPLETE_WITH_LIST3("MINVALUE", "MAXVALUE", "CYCLE");
 +#ifndef PGXC
 +      /* PGXCTODO: This should be re-enabled once SERVER is supported */
        /* ALTER SERVER <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "SERVER") == 0)
-       {
-               static const char *const list_ALTER_SERVER[] =
-               {"VERSION", "OPTIONS", "OWNER TO", NULL};
-               COMPLETE_WITH_LIST(list_ALTER_SERVER);
-       }
+       else if (Matches3("ALTER", "SERVER", MatchAny))
+               COMPLETE_WITH_LIST4("VERSION", "OPTIONS", "OWNER TO", "RENAME TO");
+       /* ALTER SERVER <name> VERSION <version> */
+       else if (Matches5("ALTER", "SERVER", MatchAny, "VERSION", MatchAny))
+               COMPLETE_WITH_CONST("OPTIONS");
 +#endif
        /* ALTER SYSTEM SET, RESET, RESET ALL */
-       else if (pg_strcasecmp(prev2_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev_wd, "SYSTEM") == 0)
-       {
-               static const char *const list_ALTERSYSTEM[] =
-               {"SET", "RESET", NULL};
-               COMPLETE_WITH_LIST(list_ALTERSYSTEM);
-       }
+       else if (Matches2("ALTER", "SYSTEM"))
+               COMPLETE_WITH_LIST2("SET", "RESET");
        /* ALTER SYSTEM SET|RESET <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "SYSTEM") == 0 &&
-                        (pg_strcasecmp(prev_wd, "SET") == 0 ||
-                         pg_strcasecmp(prev_wd, "RESET") == 0))
+       else if (Matches3("ALTER", "SYSTEM", "SET|RESET"))
                COMPLETE_WITH_QUERY(Query_for_list_of_alter_system_set_vars);
        /* ALTER VIEW <name> */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "VIEW") == 0)
-       {
-               static const char *const list_ALTERVIEW[] =
-               {"ALTER COLUMN", "OWNER TO", "RENAME TO", "SET SCHEMA", NULL};
-               COMPLETE_WITH_LIST(list_ALTERVIEW);
-       }
+       else if (Matches3("ALTER", "VIEW", MatchAny))
+               COMPLETE_WITH_LIST4("ALTER COLUMN", "OWNER TO", "RENAME TO",
+                                                       "SET SCHEMA");
        /* ALTER MATERIALIZED VIEW <name> */
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "MATERIALIZED") == 0 &&
-                        pg_strcasecmp(prev2_wd, "VIEW") == 0)
-       {
-               static const char *const list_ALTERMATVIEW[] =
-               {"ALTER COLUMN", "OWNER TO", "RENAME TO", "SET SCHEMA", NULL};
-               COMPLETE_WITH_LIST(list_ALTERMATVIEW);
-       }
+       else if (Matches4("ALTER", "MATERIALIZED", "VIEW", MatchAny))
+               COMPLETE_WITH_LIST4("ALTER COLUMN", "OWNER TO", "RENAME TO",
+                                                       "SET SCHEMA");
  
+       /* ALTER POLICY <name> */
+       else if (Matches2("ALTER", "POLICY"))
+               COMPLETE_WITH_QUERY(Query_for_list_of_policies);
        /* ALTER POLICY <name> ON */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "POLICY") == 0)
+       else if (Matches3("ALTER", "POLICY", MatchAny))
                COMPLETE_WITH_CONST("ON");
        /* ALTER POLICY <name> ON <table> */
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev_wd, "ON") == 0)
-               COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
-       /* ALTER POLICY <name> ON <table> - show options */
-       else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev4_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev2_wd, "ON") == 0)
+       else if (Matches4("ALTER", "POLICY", MatchAny, "ON"))
        {
-               static const char *const list_ALTERPOLICY[] =
-               {"RENAME TO", "TO", "USING", "WITH CHECK", NULL};
-               COMPLETE_WITH_LIST(list_ALTERPOLICY);
+               completion_info_charp = prev2_wd;
+               COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_policy);
        }
+       /* ALTER POLICY <name> ON <table> - show options */
+       else if (Matches5("ALTER", "POLICY", MatchAny, "ON", MatchAny))
+               COMPLETE_WITH_LIST4("RENAME TO", "TO", "USING (", "WITH CHECK (");
        /* ALTER POLICY <name> ON <table> TO <role> */
-       else if (pg_strcasecmp(prev6_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev3_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev_wd, "TO") == 0)
+       else if (Matches6("ALTER", "POLICY", MatchAny, "ON", MatchAny, "TO"))
                COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles);
        /* ALTER POLICY <name> ON <table> USING ( */
-       else if (pg_strcasecmp(prev6_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev3_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev_wd, "USING") == 0)
+       else if (Matches6("ALTER", "POLICY", MatchAny, "ON", MatchAny, "USING"))
                COMPLETE_WITH_CONST("(");
        /* ALTER POLICY <name> ON <table> WITH CHECK ( */
-       else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev4_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev2_wd, "WITH") == 0 &&
-                        pg_strcasecmp(prev_wd, "CHECK") == 0)
+       else if (Matches7("ALTER", "POLICY", MatchAny, "ON", MatchAny, "WITH", "CHECK"))
                COMPLETE_WITH_CONST("(");
  
        /* ALTER RULE <name>, add ON */
        }
  
        /* ALTER RULE <name> ON <name> */
-       else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev4_wd, "RULE") == 0)
+       else if (Matches5("ALTER", "RULE", MatchAny, "ON", MatchAny))
                COMPLETE_WITH_CONST("RENAME TO");
  
 +#ifndef PGXC
 +      /* PGXCTODO: This should be re-enabled once TRIGGER is supported */
        /* ALTER TRIGGER <name>, add ON */
-       else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "TRIGGER") == 0)
+       else if (Matches3("ALTER", "TRIGGER", MatchAny))
                COMPLETE_WITH_CONST("ON");
  
-       else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "TRIGGER") == 0)
+       else if (Matches4("ALTER", "TRIGGER", MatchAny, MatchAny))
        {
                completion_info_charp = prev2_wd;
                COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_trigger);
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
  
        /* ALTER TRIGGER <name> ON <name> */
-       else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
-                        pg_strcasecmp(prev4_wd, "TRIGGER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "ON") == 0)
+       else if (Matches5("ALTER", "TRIGGER", MatchAny, "ON", MatchAny))
                COMPLETE_WITH_CONST("RENAME TO");
 +#endif
  
        /*
         * If we detect ALTER TABLE <name>, suggest sub commands
                COMPLETE_WITH_QUERY(Query_for_list_of_roles);
  
  /* BEGIN, END, ABORT */
-       else if (pg_strcasecmp(prev_wd, "BEGIN") == 0 ||
-                        pg_strcasecmp(prev_wd, "END") == 0 ||
-                        pg_strcasecmp(prev_wd, "ABORT") == 0)
-       {
-               static const char *const list_TRANS[] =
-               {"WORK", "TRANSACTION", NULL};
-               COMPLETE_WITH_LIST(list_TRANS);
-       }
+       else if (Matches1("BEGIN|END|ABORT"))
+               COMPLETE_WITH_LIST2("WORK", "TRANSACTION");
  /* COMMIT */
-       else if (pg_strcasecmp(prev_wd, "COMMIT") == 0)
-       {
-               static const char *const list_COMMIT[] =
-               {"WORK", "TRANSACTION", "PREPARED", NULL};
-               COMPLETE_WITH_LIST(list_COMMIT);
-       }
+       else if (Matches1("COMMIT"))
+               COMPLETE_WITH_LIST3("WORK", "TRANSACTION", "PREPARED");
  /* RELEASE SAVEPOINT */
-       else if (pg_strcasecmp(prev_wd, "RELEASE") == 0)
+       else if (Matches1("RELEASE"))
                COMPLETE_WITH_CONST("SAVEPOINT");
- /* ROLLBACK*/
-       else if (pg_strcasecmp(prev_wd, "ROLLBACK") == 0)
-       {
-               static const char *const list_TRANS[] =
-               {"WORK", "TRANSACTION", "TO SAVEPOINT", "PREPARED", NULL};
-               COMPLETE_WITH_LIST(list_TRANS);
-       }
 +#ifdef PGXC
 +/* CLEAN CONNECTION */
-       else if (pg_strcasecmp(prev2_wd, "CLEAN") == 0 &&
-                        pg_strcasecmp(prev_wd, "CONNECTION") == 0)
++      else if (Matches2("CLEAN", "CONNECTION"))
 +              COMPLETE_WITH_CONST("TO");
-       else if (pg_strcasecmp(prev3_wd, "CLEAN") == 0 &&
-                        pg_strcasecmp(prev2_wd, "CONNECTION") == 0 &&
-                        pg_strcasecmp(prev_wd, "TO") == 0)
++      else if (Matches3("CLEAN", "CONNECTION", "TO"))
 +      /* CLEAN CONNECTION TO */
-       {
-               static const char *const list_CLEANCONNECTIONOPT[] =
-                       {"ALL", "COORDINATOR", "NODE", NULL};
-               COMPLETE_WITH_LIST(list_CLEANCONNECTIONOPT);
-       }
-       else if (pg_strcasecmp(prev4_wd, "CLEAN") == 0 &&
-                        pg_strcasecmp(prev3_wd, "CONNECTION") == 0 &&
-                        pg_strcasecmp(prev2_wd, "TO") == 0 &&
-                        pg_strcasecmp(prev_wd, "ALL") == 0)
++              COMPLETE_WITH_LIST3("ALL", "COORDINATOR", "NODE");
++      else if (Matches4("CLEAN", "CONNECTION", "TO", "ALL"))
 +              COMPLETE_WITH_CONST("FORCE");
-       else if (pg_strcasecmp(prev4_wd, "CLEAN") == 0 &&
-                        pg_strcasecmp(prev3_wd, "CONNECTION") == 0 &&
-                        pg_strcasecmp(prev2_wd, "TO") == 0 &&
-                        pg_strcasecmp(prev_wd, "COORDINATOR") == 0)
++      else if (Matches4("CLEAN", "CONNECTION", "TO", "COORDINATOR"))
 +              COMPLETE_WITH_QUERY(Query_for_list_of_available_coordinators);
-       else if (pg_strcasecmp(prev4_wd, "CLEAN") == 0 &&
-                        pg_strcasecmp(prev3_wd, "CONNECTION") == 0 &&
-                        pg_strcasecmp(prev2_wd, "TO") == 0 &&
-                        pg_strcasecmp(prev_wd, "NODE") == 0)
++      else if (Matches4("CLEAN", "CONNECTION", "TO", "NODE"))
 +              COMPLETE_WITH_QUERY(Query_for_list_of_available_datanodes);
-       else if (pg_strcasecmp(prev2_wd, "TO") == 0 &&
-                        pg_strcasecmp(prev_wd, "USER") == 0)
++      else if (Matches2("TO", "USER"))
 +              COMPLETE_WITH_QUERY(Query_for_list_of_roles);
-       else if (pg_strcasecmp(prev2_wd, "FOR") == 0 &&
-                        pg_strcasecmp(prev_wd, "DATABASE") == 0)
++      else if (Matches2("FOR", "DATABASE"))
 +              COMPLETE_WITH_QUERY(Query_for_list_of_databases);
 +#endif
+ /* ROLLBACK */
+       else if (Matches1("ROLLBACK"))
+               COMPLETE_WITH_LIST4("WORK", "TRANSACTION", "TO SAVEPOINT", "PREPARED");
  /* CLUSTER */
-       /*
-        * If the previous word is CLUSTER and not WITHOUT produce list of tables
-        */
-       else if (pg_strcasecmp(prev_wd, "CLUSTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "WITHOUT") != 0)
+       else if (Matches1("CLUSTER"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, "UNION SELECT 'VERBOSE'");
-       /*
-        * If the previous words are CLUSTER VERBOSE produce list of tables
-        */
-       else if (pg_strcasecmp(prev_wd, "VERBOSE") == 0 &&
-                        pg_strcasecmp(prev2_wd, "CLUSTER") == 0)
+       else if (Matches2("CLUSTER", "VERBOSE"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL);
        /* If we have CLUSTER <sth>, then add "USING" */
-       else if (pg_strcasecmp(prev2_wd, "CLUSTER") == 0 &&
-                        pg_strcasecmp(prev_wd, "ON") != 0 &&
-                        pg_strcasecmp(prev_wd, "VERBOSE") != 0)
-       {
+       else if (Matches2("CLUSTER", MatchAnyExcept("VERBOSE|ON")))
                COMPLETE_WITH_CONST("USING");
-       }
        /* If we have CLUSTER VERBOSE <sth>, then add "USING" */
-       else if (pg_strcasecmp(prev3_wd, "CLUSTER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "VERBOSE") == 0)
-       {
+       else if (Matches3("CLUSTER", "VERBOSE", MatchAny))
                COMPLETE_WITH_CONST("USING");
-       }
-       /*
-        * If we have CLUSTER <sth> USING, then add the index as well.
-        */
-       else if (pg_strcasecmp(prev3_wd, "CLUSTER") == 0 &&
-                        pg_strcasecmp(prev_wd, "USING") == 0)
-       {
-               completion_info_charp = prev2_wd;
-               COMPLETE_WITH_QUERY(Query_for_index_of_table);
-       }
-       /*
-        * If we have CLUSTER VERBOSE <sth> USING, then add the index as well.
-        */
-       else if (pg_strcasecmp(prev4_wd, "CLUSTER") == 0 &&
-                        pg_strcasecmp(prev3_wd, "VERBOSE") == 0 &&
-                        pg_strcasecmp(prev_wd, "USING") == 0)
+       /* If we have CLUSTER <sth> USING, then add the index as well */
+       else if (Matches3("CLUSTER", MatchAny, "USING") ||
+                        Matches4("CLUSTER", "VERBOSE", MatchAny, "USING"))
        {
                completion_info_charp = prev2_wd;
                COMPLETE_WITH_QUERY(Query_for_index_of_table);
         * Complete INDEX <name> ON <table> with a list of table columns (which
         * should really be in parens)
         */
-       else if ((pg_strcasecmp(prev4_wd, "INDEX") == 0 ||
-                         pg_strcasecmp(prev3_wd, "INDEX") == 0 ||
-                         pg_strcasecmp(prev3_wd, "CONCURRENTLY") == 0) &&
-                        pg_strcasecmp(prev2_wd, "ON") == 0)
-       {
-               static const char *const list_CREATE_INDEX2[] =
-               {"(", "USING", NULL};
-               COMPLETE_WITH_LIST(list_CREATE_INDEX2);
-       }
-       else if ((pg_strcasecmp(prev5_wd, "INDEX") == 0 ||
-                         pg_strcasecmp(prev4_wd, "INDEX") == 0 ||
-                         pg_strcasecmp(prev4_wd, "CONCURRENTLY") == 0) &&
-                        pg_strcasecmp(prev3_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev_wd, "(") == 0)
+       else if (TailMatches4("INDEX", MatchAny, "ON", MatchAny) ||
+                        TailMatches3("INDEX|CONCURRENTLY", "ON", MatchAny))
+               COMPLETE_WITH_LIST2("(", "USING");
+       else if (TailMatches5("INDEX", MatchAny, "ON", MatchAny, "(") ||
+                        TailMatches4("INDEX|CONCURRENTLY", "ON", MatchAny, "("))
                COMPLETE_WITH_ATTR(prev2_wd, "");
        /* same if you put in USING */
-       else if (pg_strcasecmp(prev5_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev3_wd, "USING") == 0 &&
-                        pg_strcasecmp(prev_wd, "(") == 0)
+       else if (TailMatches5("ON", MatchAny, "USING", MatchAny, "("))
                COMPLETE_WITH_ATTR(prev4_wd, "");
        /* Complete USING with an index method */
-       else if ((pg_strcasecmp(prev6_wd, "INDEX") == 0 ||
-                         pg_strcasecmp(prev5_wd, "INDEX") == 0 ||
-                         pg_strcasecmp(prev4_wd, "INDEX") == 0) &&
-                        pg_strcasecmp(prev3_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev_wd, "USING") == 0)
+       else if (TailMatches6("INDEX", MatchAny, MatchAny, "ON", MatchAny, "USING") ||
+                        TailMatches5("INDEX", MatchAny, "ON", MatchAny, "USING") ||
+                        TailMatches4("INDEX", "ON", MatchAny, "USING"))
                COMPLETE_WITH_QUERY(Query_for_list_of_access_methods);
-       else if (pg_strcasecmp(prev4_wd, "ON") == 0 &&
-                        (!(pg_strcasecmp(prev6_wd, "POLICY") == 0) &&
-                         !(pg_strcasecmp(prev4_wd, "FOR") == 0)) &&
-                        pg_strcasecmp(prev2_wd, "USING") == 0)
+       else if (TailMatches4("ON", MatchAny, "USING", MatchAny) &&
+                        !TailMatches6("POLICY", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny) &&
+                        !TailMatches4("FOR", MatchAny, MatchAny, MatchAny))
                COMPLETE_WITH_CONST("(");
 -
 +#ifdef PGXC
 +/* CREATE NODE */
-       else if (pg_strcasecmp(prev3_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev2_wd, "NODE") == 0)
++      else if (Matches2("CREATE", "NODE"))
 +              COMPLETE_WITH_CONST("WITH");
-       else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev3_wd, "NODE") == 0 &&
-                        pg_strcasecmp(prev_wd, "WITH") == 0)
++      else if (Matches3("CREATE", "NODE", "WITH"))
 +              COMPLETE_WITH_CONST("(");
-       else if (pg_strcasecmp(prev5_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev4_wd, "NODE") == 0 &&
-                        pg_strcasecmp(prev2_wd, "WITH") == 0)
-       {
-               static const char *const list_NODEOPT[] =
-               {"TYPE", "HOST", "PORT", "PRIMARY", "PREFERRED", NULL};
-               COMPLETE_WITH_LIST(list_NODEOPT);
-       }
++      else if (Matches4("CREATE", "NODE", "WITH", "("))
++              COMPLETE_WITH_LIST5("TYPE", "HOST", "PORT", "PRIMARY", "PREFERRED");
 +/* CREATE NODEGROUP */
-       else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev3_wd, "NODE") == 0 &&
-                        pg_strcasecmp(prev2_wd, "GROUP") == 0)
++      else if (Matches3("CREATE", "NODE", "GROUP"))
 +              COMPLETE_WITH_CONST("WITH");
 +#endif
        /* CREATE POLICY */
        /* Complete "CREATE POLICY <name> ON" */
-       else if (pg_strcasecmp(prev3_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev2_wd, "POLICY") == 0)
+       else if (Matches3("CREATE", "POLICY", MatchAny))
                COMPLETE_WITH_CONST("ON");
        /* Complete "CREATE POLICY <name> ON <table>" */
-       else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev3_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev_wd, "ON") == 0)
+       else if (Matches4("CREATE", "POLICY", MatchAny, "ON"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
        /* Complete "CREATE POLICY <name> ON <table> FOR|TO|USING|WITH CHECK" */
-       else if (pg_strcasecmp(prev5_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev4_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev2_wd, "ON") == 0)
-       {
-               static const char *const list_POLICYOPTIONS[] =
-               {"FOR", "TO", "USING", "WITH CHECK", NULL};
-               COMPLETE_WITH_LIST(list_POLICYOPTIONS);
-       }
-       /*
-        * Complete "CREATE POLICY <name> ON <table> FOR
-        * ALL|SELECT|INSERT|UPDATE|DELETE"
-        */
-       else if (pg_strcasecmp(prev6_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev3_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev_wd, "FOR") == 0)
-       {
-               static const char *const list_POLICYCMDS[] =
-               {"ALL", "SELECT", "INSERT", "UPDATE", "DELETE", NULL};
-               COMPLETE_WITH_LIST(list_POLICYCMDS);
-       }
+       else if (Matches5("CREATE", "POLICY", MatchAny, "ON", MatchAny))
+               COMPLETE_WITH_LIST4("FOR", "TO", "USING (", "WITH CHECK (");
+       /* CREATE POLICY <name> ON <table> FOR ALL|SELECT|INSERT|UPDATE|DELETE */
+       else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR"))
+               COMPLETE_WITH_LIST5("ALL", "SELECT", "INSERT", "UPDATE", "DELETE");
        /* Complete "CREATE POLICY <name> ON <table> FOR INSERT TO|WITH CHECK" */
-       else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev4_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev2_wd, "FOR") == 0 &&
-                        pg_strcasecmp(prev_wd, "INSERT") == 0)
-       {
-               static const char *const list_POLICYOPTIONS[] =
-               {"TO", "WITH CHECK", NULL};
-               COMPLETE_WITH_LIST(list_POLICYOPTIONS);
-       }
-       /*
-        * Complete "CREATE POLICY <name> ON <table> FOR SELECT TO|USING" Complete
-        * "CREATE POLICY <name> ON <table> FOR DELETE TO|USING"
-        */
-       else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev4_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev2_wd, "FOR") == 0 &&
-                        (pg_strcasecmp(prev_wd, "SELECT") == 0 ||
-                         pg_strcasecmp(prev_wd, "DELETE") == 0))
-       {
-               static const char *const list_POLICYOPTIONS[] =
-               {"TO", "USING", NULL};
-               COMPLETE_WITH_LIST(list_POLICYOPTIONS);
-       }
-       /*
-        * Complete "CREATE POLICY <name> ON <table> FOR ALL TO|USING|WITH CHECK"
-        * Complete "CREATE POLICY <name> ON <table> FOR UPDATE TO|USING|WITH
-        * CHECK"
-        */
-       else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev4_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev2_wd, "FOR") == 0 &&
-                        (pg_strcasecmp(prev_wd, "ALL") == 0 ||
-                         pg_strcasecmp(prev_wd, "UPDATE") == 0))
-       {
-               static const char *const list_POLICYOPTIONS[] =
-               {"TO", "USING", "WITH CHECK", NULL};
-               COMPLETE_WITH_LIST(list_POLICYOPTIONS);
-       }
+       else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "INSERT"))
+               COMPLETE_WITH_LIST2("TO", "WITH CHECK (");
+       /* Complete "CREATE POLICY <name> ON <table> FOR SELECT|DELETE TO|USING" */
+       else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "SELECT|DELETE"))
+               COMPLETE_WITH_LIST2("TO", "USING (");
+       /* CREATE POLICY <name> ON <table> FOR ALL|UPDATE TO|USING|WITH CHECK */
+       else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "ALL|UPDATE"))
+               COMPLETE_WITH_LIST3("TO", "USING (", "WITH CHECK (");
        /* Complete "CREATE POLICY <name> ON <table> TO <role>" */
-       else if (pg_strcasecmp(prev6_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev3_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev_wd, "TO") == 0)
+       else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "TO"))
                COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles);
        /* Complete "CREATE POLICY <name> ON <table> USING (" */
-       else if (pg_strcasecmp(prev6_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
-                        pg_strcasecmp(prev3_wd, "ON") == 0 &&
-                        pg_strcasecmp(prev_wd, "USING") == 0)
+       else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "USING"))
                COMPLETE_WITH_CONST("(");
  
  /* CREATE RULE */
         * complete CREATE TRIGGER <name> BEFORE,AFTER event ON with a list of
         * tables
         */
-       else if (pg_strcasecmp(prev5_wd, "TRIGGER") == 0 &&
-                        (pg_strcasecmp(prev3_wd, "BEFORE") == 0 ||
-                         pg_strcasecmp(prev3_wd, "AFTER") == 0) &&
-                        pg_strcasecmp(prev_wd, "ON") == 0)
+       else if (TailMatches6("CREATE", "TRIGGER", MatchAny, "BEFORE|AFTER", MatchAny, "ON"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
        /* complete CREATE TRIGGER ... INSTEAD OF event ON with a list of views */
-       else if (pg_strcasecmp(prev4_wd, "INSTEAD") == 0 &&
-                        pg_strcasecmp(prev3_wd, "OF") == 0 &&
-                        pg_strcasecmp(prev_wd, "ON") == 0)
+       else if (TailMatches7("CREATE", "TRIGGER", MatchAny, "INSTEAD", "OF", MatchAny, "ON"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_views, NULL);
        /* complete CREATE TRIGGER ... EXECUTE with PROCEDURE */
-       else if (pg_strcasecmp(prev_wd, "EXECUTE") == 0 &&
-                        prev2_wd[0] != '\0')
+       else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches1("EXECUTE"))
                COMPLETE_WITH_CONST("PROCEDURE");
  
 +#endif
  /* CREATE ROLE,USER,GROUP <name> */
-       else if (pg_strcasecmp(prev3_wd, "CREATE") == 0 &&
-                        !(pg_strcasecmp(prev2_wd, "USER") == 0 && pg_strcasecmp(prev_wd, "MAPPING") == 0) &&
-                        (pg_strcasecmp(prev2_wd, "ROLE") == 0 ||
-                         pg_strcasecmp(prev2_wd, "GROUP") == 0 || pg_strcasecmp(prev2_wd, "USER") == 0))
+       else if (Matches3("CREATE", "ROLE|GROUP|USER", MatchAny) &&
+                        !TailMatches2("USER", "MAPPING"))
        {
                static const char *const list_CREATEROLE[] =
                {"ADMIN", "BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE",
        /* XXX: implement tab completion for DELETE ... USING */
  
  /* DISCARD */
-       else if (pg_strcasecmp(prev_wd, "DISCARD") == 0)
-       {
-               static const char *const list_DISCARD[] =
-               {"ALL", "PLANS", "SEQUENCES", "TEMP", NULL};
-               COMPLETE_WITH_LIST(list_DISCARD);
-       }
+       else if (Matches1("DISCARD"))
+               COMPLETE_WITH_LIST4("ALL", "PLANS", "SEQUENCES", "TEMP");
  
  /* DO */
-       /*
-        * Complete DO with LANGUAGE.
-        */
-       else if (pg_strcasecmp(prev_wd, "DO") == 0)
-       {
-               static const char *const list_DO[] =
-               {"LANGUAGE", NULL};
-               COMPLETE_WITH_LIST(list_DO);
-       }
- /* DROP (when not the previous word) */
-       /* DROP AGGREGATE */
-       else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev2_wd, "AGGREGATE") == 0)
+       else if (Matches1("DO"))
+               COMPLETE_WITH_CONST("LANGUAGE");
+ /* DROP */
+       /* Complete DROP object with CASCADE / RESTRICT */
+       else if (Matches3("DROP",
+                                         "COLLATION|CONVERSION|DOMAIN|EXTENSION|LANGUAGE|SCHEMA|SEQUENCE|SERVER|TABLE|TYPE|VIEW",
+                                         MatchAny) ||
+                        Matches4("DROP", "ACCESS", "METHOD", MatchAny) ||
+                        (Matches4("DROP", "AGGREGATE|FUNCTION", MatchAny, MatchAny) &&
+                         ends_with(prev_wd, ')')) ||
+                        Matches4("DROP", "EVENT", "TRIGGER", MatchAny) ||
+                        Matches5("DROP", "FOREIGN", "DATA", "WRAPPER", MatchAny) ||
+                        Matches4("DROP", "FOREIGN", "TABLE", MatchAny) ||
+                        Matches5("DROP", "TEXT", "SEARCH", "CONFIGURATION|DICTIONARY|PARSER|TEMPLATE", MatchAny))
+               COMPLETE_WITH_LIST2("CASCADE", "RESTRICT");
+       /* help completing some of the variants */
+       else if (Matches3("DROP", "AGGREGATE|FUNCTION", MatchAny))
                COMPLETE_WITH_CONST("(");
+       else if (Matches4("DROP", "AGGREGATE|FUNCTION", MatchAny, "("))
+               COMPLETE_WITH_FUNCTION_ARG(prev2_wd);
+       else if (Matches2("DROP", "FOREIGN"))
+               COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE");
  
-       /* DROP object with CASCADE / RESTRICT */
-       else if ((pg_strcasecmp(prev3_wd, "DROP") == 0 &&
-                         (pg_strcasecmp(prev2_wd, "COLLATION") == 0 ||
-                          pg_strcasecmp(prev2_wd, "CONVERSION") == 0 ||
-                          pg_strcasecmp(prev2_wd, "DOMAIN") == 0 ||
-                          pg_strcasecmp(prev2_wd, "EXTENSION") == 0 ||
-                          pg_strcasecmp(prev2_wd, "FUNCTION") == 0 ||
-                          pg_strcasecmp(prev2_wd, "INDEX") == 0 ||
-                          pg_strcasecmp(prev2_wd, "LANGUAGE") == 0 ||
-                          pg_strcasecmp(prev2_wd, "SCHEMA") == 0 ||
-                          pg_strcasecmp(prev2_wd, "SEQUENCE") == 0 ||
-                          pg_strcasecmp(prev2_wd, "SERVER") == 0 ||
-                          pg_strcasecmp(prev2_wd, "TABLE") == 0 ||
-                          pg_strcasecmp(prev2_wd, "TYPE") == 0 ||
-                          pg_strcasecmp(prev2_wd, "VIEW") == 0)) ||
-                        (pg_strcasecmp(prev4_wd, "DROP") == 0 &&
-                         pg_strcasecmp(prev3_wd, "AGGREGATE") == 0 &&
-                         prev_wd[strlen(prev_wd) - 1] == ')') ||
-                        (pg_strcasecmp(prev4_wd, "DROP") == 0 &&
-                         pg_strcasecmp(prev3_wd, "EVENT") == 0 &&
-                         pg_strcasecmp(prev2_wd, "TRIGGER") == 0) ||
- #ifndef PGXC
-                       /* PGXCTODO: This should be re-enabled once FOREIGN DATA WRAPPER is supported */
-                        (pg_strcasecmp(prev5_wd, "DROP") == 0 &&
-                         pg_strcasecmp(prev4_wd, "FOREIGN") == 0 &&
-                         pg_strcasecmp(prev3_wd, "DATA") == 0 &&
-                         pg_strcasecmp(prev2_wd, "WRAPPER") == 0) ||
- #endif
-                        (pg_strcasecmp(prev5_wd, "DROP") == 0 &&
-                         pg_strcasecmp(prev4_wd, "TEXT") == 0 &&
-                         pg_strcasecmp(prev3_wd, "SEARCH") == 0 &&
-                         (pg_strcasecmp(prev2_wd, "CONFIGURATION") == 0 ||
-                          pg_strcasecmp(prev2_wd, "DICTIONARY") == 0 ||
-                          pg_strcasecmp(prev2_wd, "PARSER") == 0 ||
-                          pg_strcasecmp(prev2_wd, "TEMPLATE") == 0))
-               )
-       {
-               if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
-                       pg_strcasecmp(prev2_wd, "FUNCTION") == 0)
-               {
-                       COMPLETE_WITH_CONST("(");
-               }
-               else
-               {
-                       static const char *const list_DROPCR[] =
-                       {"CASCADE", "RESTRICT", NULL};
-                       COMPLETE_WITH_LIST(list_DROPCR);
-               }
-       }
- #ifndef PGXC
-       /* PGXCTODO: This should be re-enabled once FOREIGN DATA WRAPPER is supported */
-       else if (pg_strcasecmp(prev2_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev_wd, "FOREIGN") == 0)
-       {
-               static const char *const drop_CREATE_FOREIGN[] =
-               {"DATA WRAPPER", "TABLE", NULL};
-               COMPLETE_WITH_LIST(drop_CREATE_FOREIGN);
-       }
- #endif
+       /* DROP INDEX */
+       else if (Matches2("DROP", "INDEX"))
+               COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes,
+                                                                  " UNION SELECT 'CONCURRENTLY'");
+       else if (Matches3("DROP", "INDEX", "CONCURRENTLY"))
+               COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL);
+       else if (Matches3("DROP", "INDEX", MatchAny))
+               COMPLETE_WITH_LIST2("CASCADE", "RESTRICT");
+       else if (Matches4("DROP", "INDEX", "CONCURRENTLY", MatchAny))
+               COMPLETE_WITH_LIST2("CASCADE", "RESTRICT");
  
        /* DROP MATERIALIZED VIEW */
-       else if (pg_strcasecmp(prev2_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev_wd, "MATERIALIZED") == 0)
-       {
+       else if (Matches2("DROP", "MATERIALIZED"))
                COMPLETE_WITH_CONST("VIEW");
-       }
-       else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev2_wd, "MATERIALIZED") == 0 &&
-                        pg_strcasecmp(prev_wd, "VIEW") == 0)
-       {
+       else if (Matches3("DROP", "MATERIALIZED", "VIEW"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews, NULL);
-       }
  
-       else if (pg_strcasecmp(prev4_wd, "DROP") == 0 &&
-                        (pg_strcasecmp(prev3_wd, "AGGREGATE") == 0 ||
-                         pg_strcasecmp(prev3_wd, "FUNCTION") == 0) &&
-                        pg_strcasecmp(prev_wd, "(") == 0)
-               COMPLETE_WITH_FUNCTION_ARG(prev2_wd);
        /* DROP OWNED BY */
-       else if (pg_strcasecmp(prev2_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev_wd, "OWNED") == 0)
+       else if (Matches2("DROP", "OWNED"))
                COMPLETE_WITH_CONST("BY");
-       else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev2_wd, "OWNED") == 0 &&
-                        pg_strcasecmp(prev_wd, "BY") == 0)
+       else if (Matches3("DROP", "OWNED", "BY"))
                COMPLETE_WITH_QUERY(Query_for_list_of_roles);
-       else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev2_wd, "TEXT") == 0 &&
-                        pg_strcasecmp(prev_wd, "SEARCH") == 0)
-       {
  
-               static const char *const list_ALTERTEXTSEARCH[] =
-               {"CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE", NULL};
-               COMPLETE_WITH_LIST(list_ALTERTEXTSEARCH);
-       }
 +#ifdef PGXC
 +      /* DROP NODE */
-       else if (pg_strcasecmp(prev2_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev_wd, "NODE") == 0)
++      else if (Matches2("DROP", "NODE"))
 +              COMPLETE_WITH_QUERY(Query_for_list_of_available_nodenames);     /* Should test this code if complesion is not confused with DROP NODE GROUP */
 +      /* DROP NODE GROUP */
-       else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev2_wd, "NODE") == 0 &&
-                        pg_strcasecmp(prev_wd, "GROUP") == 0)
++      else if (Matches3("DROP", "NODE", "GROUP"))
 +              COMPLETE_WITH_QUERY(Query_for_list_of_available_nodegroup_names);
 +/* EXECUTE DIRECT */
-       else if (pg_strcasecmp(prev2_wd, "EXECUTE") == 0 &&
-                        pg_strcasecmp(prev_wd, "DIRECT") == 0)
++      else if (Matches2("EXECUTE", "DIRECT"))
 +              COMPLETE_WITH_CONST("ON");
-       else if (pg_strcasecmp(prev3_wd, "EXECUTE") == 0 &&
-                        pg_strcasecmp(prev2_wd, "DIRECT") == 0 &&
-                        pg_strcasecmp(prev_wd, "ON") == 0)
++      else if (Matches3("EXECUTE", "DIRECT", "ON"))
 +              COMPLETE_WITH_QUERY(Query_for_list_of_available_nodenames);
 +#endif
+       else if (Matches3("DROP", "TEXT", "SEARCH"))
+               COMPLETE_WITH_LIST4("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE");
  
        /* DROP TRIGGER */
-       else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev2_wd, "TRIGGER") == 0)
-       {
+       else if (Matches3("DROP", "TRIGGER", MatchAny))
                COMPLETE_WITH_CONST("ON");
-       }
-       else if (pg_strcasecmp(prev4_wd, "DROP") == 0 &&
-                        pg_strcasecmp(prev3_wd, "TRIGGER") == 0 &&
-                        pg_strcasecmp(prev_wd, "ON") == 0)
+       else if (Matches4("DROP", "TRIGGER", MatchAny, "ON"))
        {
                completion_info_charp = prev2_wd;
                COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_trigger);
         * but we may as well tab-complete both: perhaps some users prefer one
         * variant or the other.
         */
-       else if (pg_strcasecmp(prev3_wd, "FETCH") == 0 ||
-                        pg_strcasecmp(prev3_wd, "MOVE") == 0)
-       {
-               static const char *const list_FROMIN[] =
-               {"FROM", "IN", NULL};
-               COMPLETE_WITH_LIST(list_FROMIN);
-       }
+       else if (Matches3("FETCH|MOVE", MatchAny, MatchAny))
+               COMPLETE_WITH_LIST2("FROM", "IN");
  
 +#ifndef PGXC
 +      /* PGXCTODO: This should be re-enabled once FOREIGN DATA WRAPPER is supported */
  /* FOREIGN DATA WRAPPER */
        /* applies in ALTER/DROP FDW and in CREATE SERVER */
-       else if (pg_strcasecmp(prev4_wd, "CREATE") != 0 &&
-                        pg_strcasecmp(prev3_wd, "FOREIGN") == 0 &&
-                        pg_strcasecmp(prev2_wd, "DATA") == 0 &&
-                        pg_strcasecmp(prev_wd, "WRAPPER") == 0)
+       else if (TailMatches3("FOREIGN", "DATA", "WRAPPER") &&
+                        !TailMatches4("CREATE", MatchAny, MatchAny, MatchAny))
                COMPLETE_WITH_QUERY(Query_for_list_of_fdws);
+       /* applies in CREATE SERVER */
+       else if (TailMatches4("FOREIGN", "DATA", "WRAPPER", MatchAny) &&
+                        HeadMatches2("CREATE", "SERVER"))
+               COMPLETE_WITH_CONST("OPTIONS");
  
  /* FOREIGN TABLE */
-       else if (pg_strcasecmp(prev3_wd, "CREATE") != 0 &&
-                        pg_strcasecmp(prev2_wd, "FOREIGN") == 0 &&
-                        pg_strcasecmp(prev_wd, "TABLE") == 0)
+       else if (TailMatches2("FOREIGN", "TABLE") &&
+                        !TailMatches3("CREATE", MatchAny, MatchAny))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_foreign_tables, NULL);
 +#endif
  
- /* GRANT && REVOKE */
+ /* FOREIGN SERVER */
+       else if (TailMatches2("FOREIGN", "SERVER"))
+               COMPLETE_WITH_QUERY(Query_for_list_of_servers);
+ /* GRANT && REVOKE --- is allowed inside CREATE SCHEMA, so use TailMatches */
        /* Complete GRANT/REVOKE with a list of roles and privileges */
-       else if (pg_strcasecmp(prev_wd, "GRANT") == 0 ||
-                        pg_strcasecmp(prev_wd, "REVOKE") == 0)
-       {
+       else if (TailMatches1("GRANT|REVOKE"))
                COMPLETE_WITH_QUERY(Query_for_list_of_roles
                                                        " UNION SELECT 'SELECT'"
                                                        " UNION SELECT 'INSERT'"
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
  
  /* UNLISTEN */
-       else if (pg_strcasecmp(prev_wd, "UNLISTEN") == 0)
+       else if (Matches1("UNLISTEN"))
                COMPLETE_WITH_QUERY("SELECT pg_catalog.quote_ident(channel) FROM pg_catalog.pg_listening_channels() AS channel WHERE substring(pg_catalog.quote_ident(channel),1,%d)='%s' UNION SELECT '*'");
  
- /* UPDATE */
+ /* UPDATE --- can be inside EXPLAIN, RULE, etc */
        /* If prev. word is UPDATE suggest a list of tables */
-       else if (pg_strcasecmp(prev_wd, "UPDATE") == 0)
+       else if (TailMatches1("UPDATE"))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_updatables, NULL);
        /* Complete UPDATE <table> with "SET" */
-       else if (pg_strcasecmp(prev2_wd, "UPDATE") == 0)
+       else if (TailMatches2("UPDATE", MatchAny))
                COMPLETE_WITH_CONST("SET");
-       /*
-        * If the previous word is SET (and it wasn't caught above as the _first_
-        * word) the word before it was (hopefully) a table name and we'll now
-        * make a list of attributes.
-        */
-       else if (pg_strcasecmp(prev_wd, "SET") == 0)
+       /* Complete UPDATE <table> SET with list of attributes */
+       else if (TailMatches3("UPDATE", MatchAny, "SET"))
                COMPLETE_WITH_ATTR(prev2_wd, "");
- /* UPDATE xx SET yy = */
-       else if (pg_strcasecmp(prev2_wd, "SET") == 0 &&
-                        pg_strcasecmp(prev4_wd, "UPDATE") == 0)
+       /* UPDATE <table> SET <attr> = */
+       else if (TailMatches4("UPDATE", MatchAny, "SET", MatchAny))
                COMPLETE_WITH_CONST("=");
  
 +#ifndef PGXC
 +      /* PGXCTODO: This should be re-enabled once USER MAPPING is supported */
  /* USER MAPPING */
-       else if ((pg_strcasecmp(prev3_wd, "ALTER") == 0 ||
-                         pg_strcasecmp(prev3_wd, "CREATE") == 0 ||
-                         pg_strcasecmp(prev3_wd, "DROP") == 0) &&
-                        pg_strcasecmp(prev2_wd, "USER") == 0 &&
-                        pg_strcasecmp(prev_wd, "MAPPING") == 0)
+       else if (Matches3("ALTER|CREATE|DROP", "USER", "MAPPING"))
                COMPLETE_WITH_CONST("FOR");
-       else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
-                        pg_strcasecmp(prev3_wd, "USER") == 0 &&
-                        pg_strcasecmp(prev2_wd, "MAPPING") == 0 &&
-                        pg_strcasecmp(prev_wd, "FOR") == 0)
+       else if (Matches4("CREATE", "USER", "MAPPING", "FOR"))
                COMPLETE_WITH_QUERY(Query_for_list_of_roles
                                                        " UNION SELECT 'CURRENT_USER'"
                                                        " UNION SELECT 'PUBLIC'"
index b4e3f67c6e7f34a5c8d63e12f79092971a8e5729,72b73697a8cbac361673dc1bc6f39711027342a5..84735f36f371819bba9a8c7d5577c634490eecce
@@@ -60,15 -74,8 +74,15 @@@ libpgcommon_srv.a: $(OBJS_SRV
  # their *.o siblings as well, which do have proper dependencies.  It's
  # a hack that might fail someday if there is a *_srv.o without a
  # corresponding *.o, but it works for now.
 +ifeq ($(genmsgids), yes)
 +PGXL_MSG_FILEID := 1
 +%_srv.o: %.c %.o
 +      $(CC) $(CFLAGS) -DPGXL_MSG_MODULE=$(PGXL_MSG_MODULE) -DPGXL_MSG_FILEID=$(PGXL_MSG_FILEID) $(subst -DFRONTEND,, $(CPPFLAGS)) -c $< -o $@
 +      $(eval PGXL_MSG_FILEID := $(shell expr $(PGXL_MSG_FILEID) + 1))
 +else
  %_srv.o: %.c %.o
-       $(CC) $(CFLAGS) $(subst -DFRONTEND,, $(CPPFLAGS)) -c $< -o $@
+       $(CC) $(CFLAGS) $(subst -DFRONTEND ,, $(CPPFLAGS)) -c $< -o $@
 +endif
  
  $(OBJS_SRV): | submake-errcodes
  
Simple merge
index d82744d5deaacf48f5914ac57325050688d8f016,cad8951f97d9cea0304291ad1efb8c4180507dc9..e4f1fc38bf24166205c15f878b50f44b9d8f9c4c
@@@ -16,9 -16,10 +16,10 @@@ include $(top_builddir)/src/Makefile.gl
  all: pg_config.h pg_config_ext.h pg_config_os.h
  
  
- # Subdirectories containing headers for server-side dev
- SUBDIRS = access bootstrap catalog commands common datatype executor foreign \
+ # Subdirectories containing installable headers
+ SUBDIRS = access bootstrap catalog commands common datatype \
+       executor fe_utils foreign \
 -      lib libpq mb nodes optimizer parser postmaster regex replication \
 +      lib libpq mb nodes optimizer parser pgxc postmaster regex replication \
        rewrite storage tcop snowball snowball/libstemmer tsearch \
        tsearch/dicts utils port port/atomics port/win32 port/win32_msvc \
        port/win32_msvc/sys port/win32/arpa port/win32/netinet \
Simple merge
index 06720ca8f4b71e9ec954af0a02366391b835f74c,474e99eb07ed98924ce43db7ccfb3d6421a7dbec..01d5a6f92619f67289456560b0df27318ec9b88c
@@@ -4,8 -4,7 +4,8 @@@
   *      POSTGRES heap tuple definitions.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/access/htup.h
index a360eeaaea5942b0bd6538b198a7e1c15a432ed8,a7a0ae224fde4a8496cf8678ac6e7e2e8f5b42e2..77920395c15941002ff5594dc64223d67af63d4e
@@@ -45,6 -45,5 +45,8 @@@ PG_RMGR(RM_SPGIST_ID, "SPGist", spg_red
  PG_RMGR(RM_BRIN_ID, "BRIN", brin_redo, brin_desc, brin_identify, NULL, NULL)
  PG_RMGR(RM_COMMIT_TS_ID, "CommitTs", commit_ts_redo, commit_ts_desc, commit_ts_identify, NULL, NULL)
  PG_RMGR(RM_REPLORIGIN_ID, "ReplicationOrigin", replorigin_redo, replorigin_desc, replorigin_identify, NULL, NULL)
 +#ifdef PGXC
 +PG_RMGR(RM_BARRIER_ID, "Barrier", barrier_redo, barrier_desc, NULL, NULL, NULL) 
 +#endif
+ PG_RMGR(RM_GENERIC_ID, "Generic", generic_redo, generic_desc, generic_identify, NULL, NULL)
+ PG_RMGR(RM_LOGICALMSG_ID, "LogicalMessage", logicalmsg_redo, logicalmsg_desc, logicalmsg_identify, NULL, NULL)
Simple merge
index af2876bfc98356e552789a7ae8d404f25190f599,969eff93795d27333472a7ea99e384ab23a42715..395953a6f13b5247b90c9e28b082ee03ca814f33
@@@ -4,10 -4,8 +4,10 @@@
   *      postgres transaction access method support code
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/access/transam.h
   *
Simple merge
index a8133c73f7d69c18d9d8c8aa6c9858215c8245c4,503ae1b82d7bdc887eb56511fafee351cd6f5521..063c8c2af3a7f4c59d882420903b59e3d4f77c96
@@@ -4,10 -4,8 +4,10 @@@
   *      postgres transaction system definitions
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/access/xact.h
   *
Simple merge
index 66a33ccd74580dae7c86517865a86920411017ec,4438f41cf042eda016a324741eeff1123dc028fe..0daf681a22a72b5b6146f2ca6cbc612b7cbb2fe6
@@@ -4,9 -4,8 +4,9 @@@
   *      include file for the bootstrapping code
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/bootstrap/bootstrap.h
   *
diff --cc src/include/c.h
Simple merge
index 95a7abb350bca805dad31ab3c58a83ecd99c3319,005d58181e20dec9fe40e7505653a9b3464a7b26..b90ffa1c83fa0c2cc1ba834b23e5575d364f9703
@@@ -4,8 -4,7 +4,8 @@@
   *      prototypes for functions in backend/catalog/catalog.c
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/catalog/catalog.h
index 523639ebeba6b0408cd284a0a1ebba60378e8114,09b36c5c78514697d7d5ddb8d1383c6eba4d602c..2aff4968815b8580279836f7ccca72337066f91f
@@@ -4,9 -4,8 +4,9 @@@
   *      Routines to support inter-object dependencies.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/catalog/dependency.h
   *
index 0ca1cd4ac6dbf6b23059af7aed3f6cd5415da518,b80d8d8b21e2db8ca0cedbadab1208bf33559a93..a5f053fc525ee78983b65df3d502744e821db5c4
@@@ -4,9 -4,8 +4,9 @@@
   *      prototypes for functions in backend/catalog/heap.c
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/catalog/heap.h
   *
index cb1afccba57bd9d3ce858f22f7a9933ea1419eb1,ca5eb3d417882ea6bd616dd0c645207cc373afc1..f2d8be7856253ab2093d6df9846a4246306d128f
@@@ -5,9 -5,8 +5,9 @@@
   *      on system catalogs
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/catalog/indexing.h
   *
index 2f5fef31c86f6002d41d53f8794ac999f7d0eef9,eee94d862267440c89c7d296805c14bc5b41a042..3c31dafa9d402b2f4ea8487adaea20c99811415d
@@@ -4,8 -4,7 +4,8 @@@
   *      prototypes for functions in backend/catalog/namespace.c
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/catalog/namespace.h
@@@ -127,10 -125,11 +126,14 @@@ extern bool isAnyTempNamespace(Oid name
  extern bool isOtherTempNamespace(Oid namespaceId);
  extern int    GetTempNamespaceBackendId(Oid namespaceId);
  extern Oid    GetTempToastNamespace(void);
+ extern void GetTempNamespaceState(Oid *tempNamespaceId,
+                                         Oid *tempToastNamespaceId);
+ extern void SetTempNamespaceState(Oid tempNamespaceId,
+                                         Oid tempToastNamespaceId);
  extern void ResetTempTableNamespace(void);
 +#ifdef XCP
 +extern void ForgetTempTableNamespace(void);
 +#endif
  
  extern OverrideSearchPath *GetOverrideSearchPath(MemoryContext context);
  extern OverrideSearchPath *CopyOverrideSearchPath(OverrideSearchPath *path);
index e75ed80311ac167303ee6d657d14b432db9368a2,8865bba0103f00e3661a84e0dd79f09dd8b26a98..0be4c0aa8917291c770259a421e251ed424238dd
@@@ -5,8 -5,7 +5,8 @@@
   *      along with the relation's initial contents.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/catalog/pg_aggregate.h
   *    aggkind                         aggregate kind, see AGGKIND_ categories below
   *    aggnumdirectargs        number of arguments that are "direct" arguments
   *    aggtransfn                      transition function
 +#ifdef PGXC
 + *    aggcollectfn            collectition function
 +#endif
   *    aggfinalfn                      final function (0 if none)
+  *    aggcombinefn            combine function (0 if none)
+  *    aggserialfn                     function to convert transtype to bytea (0 if none)
+  *    aggdeserialfn           function to convert bytea to transtype (0 if none)
   *    aggmtransfn                     forward function for moving-aggregate mode (0 if none)
   *    aggminvtransfn          inverse function for moving-aggregate mode (0 if none)
   *    aggmfinalfn                     final function for moving-aggregate mode (0 if none)
@@@ -70,8 -58,10 +73,11 @@@ CATALOG(pg_aggregate,2600) BKI_WITHOUT_
        char            aggkind;
        int16           aggnumdirectargs;
        regproc         aggtransfn;
 +      regproc         aggcollectfn; /* PGXC */
        regproc         aggfinalfn;
+       regproc         aggcombinefn;
+       regproc         aggserialfn;
+       regproc         aggdeserialfn;
        regproc         aggmtransfn;
        regproc         aggminvtransfn;
        regproc         aggmfinalfn;
@@@ -108,22 -96,22 +114,25 @@@ typedef FormData_pg_aggregate *Form_pg_
  #define Anum_pg_aggregate_aggkind                     2
  #define Anum_pg_aggregate_aggnumdirectargs    3
  #define Anum_pg_aggregate_aggtransfn          4
 -#define Anum_pg_aggregate_aggfinalfn          5
 -#define Anum_pg_aggregate_aggcombinefn                6
 -#define Anum_pg_aggregate_aggserialfn         7
 -#define Anum_pg_aggregate_aggdeserialfn               8
 -#define Anum_pg_aggregate_aggmtransfn         9
 -#define Anum_pg_aggregate_aggminvtransfn      10
 -#define Anum_pg_aggregate_aggmfinalfn         11
 -#define Anum_pg_aggregate_aggfinalextra               12
 -#define Anum_pg_aggregate_aggmfinalextra      13
 -#define Anum_pg_aggregate_aggsortop                   14
 -#define Anum_pg_aggregate_aggtranstype                15
 -#define Anum_pg_aggregate_aggtransspace               16
 -#define Anum_pg_aggregate_aggmtranstype               17
 -#define Anum_pg_aggregate_aggmtransspace      18
 -#define Anum_pg_aggregate_agginitval          19
 -#define Anum_pg_aggregate_aggminitval         20
 +#define Anum_pg_aggregate_aggcollectfn                5
 +#define Anum_pg_aggregate_aggfinalfn          6
- #define Anum_pg_aggregate_aggmtransfn         7
- #define Anum_pg_aggregate_aggminvtransfn      8
- #define Anum_pg_aggregate_aggmfinalfn         9
- #define Anum_pg_aggregate_aggfinalextra               10
- #define Anum_pg_aggregate_aggmfinalextra      11
- #define Anum_pg_aggregate_aggsortop                   12
- #define Anum_pg_aggregate_aggtranstype                13
- #define Anum_pg_aggregate_aggcollecttype      14
- #define Anum_pg_aggregate_aggtransspace               15
- #define Anum_pg_aggregate_aggmtranstype               16
- #define Anum_pg_aggregate_aggmtransspace      17
- #define Anum_pg_aggregate_agginitval          18
- #define Anum_pg_aggregate_agginitcollect      19
- #define Anum_pg_aggregate_aggminitval         20
++#define Anum_pg_aggregate_aggcombinefn                7
++#define Anum_pg_aggregate_aggserialfn         8
++#define Anum_pg_aggregate_aggdeserialfn               9
++#define Anum_pg_aggregate_aggmtransfn         10
++#define Anum_pg_aggregate_aggminvtransfn      11
++#define Anum_pg_aggregate_aggmfinalfn         12
++#define Anum_pg_aggregate_aggfinalextra               13
++#define Anum_pg_aggregate_aggmfinalextra      14
++#define Anum_pg_aggregate_aggsortop                   15
++#define Anum_pg_aggregate_aggtranstype                16
++#define Anum_pg_aggregate_aggcollecttype      17
++#define Anum_pg_aggregate_aggtransspace               18
++#define Anum_pg_aggregate_aggmtranstype               19
++#define Anum_pg_aggregate_aggmtransspace      20
++#define Anum_pg_aggregate_agginitval          21
++#define Anum_pg_aggregate_agginitcollect      22
++#define Anum_pg_aggregate_aggminitval         23
  
  /*
   * Symbolic values for aggkind column.  We distinguish normal aggregates
   */
  
  /* avg */
- DATA(insert ( 2100    n 0 int8_avg_accum      numeric_poly_collect    numeric_poly_avg int8_avg_accum int8_avg_accum_inv      numeric_poly_avg                f f 0   7019 7019       128 7019        128 _null_ _null_       _null_ ));
- DATA(insert ( 2101    n 0 int4_avg_accum      int8_avg_collect        int8_avg int4_avg_accum int4_avg_accum_inv      int8_avg        f f 0   1016 1016       0       1016    0       "{0,0}" "{0,0}" "{0,0}" ));
- DATA(insert ( 2102    n 0 int2_avg_accum      int8_avg_collect        int8_avg int2_avg_accum int2_avg_accum_inv      int8_avg        f f 0   1016 1016       0       1016    0       "{0,0}" "{0,0}" "{0,0}" ));
- DATA(insert ( 2103    n 0 numeric_avg_accum numeric_collect   numeric_avg     numeric_avg_accum numeric_accum_inv numeric_avg f f 0   7018    7018    128 7018        128 _null_ _null_       _null_ ));
- DATA(insert ( 2104    n 0 float4_accum        float8_collect  float8_avg              -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}"     _null_ ));
- DATA(insert ( 2105    n 0 float8_accum        float8_collect  float8_avg              -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}"     _null_ ));
- DATA(insert ( 2106    n 0 interval_accum      interval_collect        interval_avg    interval_accum  interval_accum_inv interval_avg f f 0   1187    1187    0       1187    0       "{0 second,0 second}" "{0 second,0 second}"     "{0 second,0 second}" ));
 -DATA(insert ( 2100    n 0 int8_avg_accum              numeric_poly_avg        int8_avg_combine        int8_avg_serialize              int8_avg_deserialize    int8_avg_accum  int8_avg_accum_inv      numeric_poly_avg        f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2101    n 0 int4_avg_accum              int8_avg                        int4_avg_combine        -                                               -                                               int4_avg_accum  int4_avg_accum_inv      int8_avg                        f f 0   1016    0       1016    0       "{0,0}" "{0,0}" ));
 -DATA(insert ( 2102    n 0 int2_avg_accum              int8_avg                        int4_avg_combine        -                                               -                                               int2_avg_accum  int2_avg_accum_inv      int8_avg                        f f 0   1016    0       1016    0       "{0,0}" "{0,0}" ));
 -DATA(insert ( 2103    n 0 numeric_avg_accum   numeric_avg                     numeric_avg_combine numeric_avg_serialize       numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_avg                 f f 0   2281    128 2281        128 _null_ _null_ ));
 -DATA(insert ( 2104    n 0 float4_accum                float8_avg                      float8_combine          -                                               -                                               -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2105    n 0 float8_accum                float8_avg                      float8_combine          -                                               -                                               -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2106    n 0 interval_accum              interval_avg            interval_combine        -                                               -                                               interval_accum  interval_accum_inv      interval_avg            f f 0   1187    0       1187    0       "{0 second,0 second}" "{0 second,0 second}" ));
++DATA(insert ( 2100    n 0 int8_avg_accum      numeric_poly_collect    numeric_poly_avg int8_avg_combine       int8_avg_serialize      int8_avg_deserialize    int8_avg_accum  int8_avg_accum_inv      numeric_poly_avg                f f 0   7019 7019       128 7019        128 _null_ _null_       _null_ ));
++DATA(insert ( 2101    n 0 int4_avg_accum      int8_avg_collect        int8_avg int4_avg_combine       -       -       int4_avg_accum  int4_avg_accum_inv      int8_avg        f f 0   1016 1016       0       1016    0       "{0,0}" "{0,0}" "{0,0}" ));
++DATA(insert ( 2102    n 0 int2_avg_accum      int8_avg_collect        int8_avg int4_avg_combine       -       -       int2_avg_accum  int2_avg_accum_inv      int8_avg        f f 0   1016 1016       0       1016    0       "{0,0}" "{0,0}" "{0,0}" ));
++DATA(insert ( 2103    n 0 numeric_avg_accum numeric_collect   numeric_avg     numeric_avg_combine     numeric_avg_serialize   numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_avg f f 0   7018    7018    128 7018        128 _null_ _null_       _null_ ));
++DATA(insert ( 2104    n 0 float4_accum        float8_collect  float8_avg              float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}"     _null_ ));
++DATA(insert ( 2105    n 0 float8_accum        float8_collect  float8_avg              float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}"     _null_ ));
++DATA(insert ( 2106    n 0 interval_accum      interval_collect        interval_avg    interval_combine        -       -       interval_accum  interval_accum_inv interval_avg f f 0   1187    1187    0       1187    0       "{0 second,0 second}" "{0 second,0 second}"     "{0 second,0 second}" ));
  
  /* sum */
- DATA(insert ( 2107    n 0 int8_avg_accum      numeric_poly_collect    numeric_poly_sum int8_avg_accum int8_avg_accum_inv      numeric_poly_sum                f f 0   7019    7019    128 7019        128 _null_      _null_  _null_ ));
- DATA(insert ( 2108    n 0 int4_sum            int8_sum_to_int8        -                               int4_avg_accum  int4_avg_accum_inv int2int4_sum f f 0   20      20      0       1016    0       _null_  _null_  "{0,0}" ));
- DATA(insert ( 2109    n 0 int2_sum            int8_sum_to_int8        -                               int2_avg_accum  int2_avg_accum_inv int2int4_sum f f 0   20      20      0       1016    0       _null_  _null_  "{0,0}" ));
- DATA(insert ( 2110    n 0 float4pl            float4pl        -                               -                               -                               -                               f f 0   700             700     0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2111    n 0 float8pl            float8pl        -                               -                               -                               -                               f f 0   701             701     0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2112    n 0 cash_pl                     cash_pl -                               cash_pl cash_mi -                               f f 0   790             790     0       790             0       _null_  _null_  _null_ ));
- DATA(insert ( 2113    n 0 interval_pl         interval_pl     -                               interval_pl             interval_mi             -                               f f 0   1186    1186    0       1186    0       _null_  _null_  _null_ ));
- DATA(insert ( 2114    n 0 numeric_avg_accum   numeric_collect numeric_sum numeric_avg_accum numeric_accum_inv numeric_sum f f 0       7018    7018    128 7018        128 _null_      _null_  _null_ ));
 -DATA(insert ( 2107    n 0 int8_avg_accum              numeric_poly_sum        int8_avg_combine        int8_avg_serialize              int8_avg_deserialize    int8_avg_accum  int8_avg_accum_inv      numeric_poly_sum        f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2108    n 0 int4_sum                    -                                       int8pl                          -                                               -                                               int4_avg_accum  int4_avg_accum_inv      int2int4_sum            f f 0   20              0       1016    0       _null_ "{0,0}" ));
 -DATA(insert ( 2109    n 0 int2_sum                    -                                       int8pl                          -                                               -                                               int2_avg_accum  int2_avg_accum_inv      int2int4_sum            f f 0   20              0       1016    0       _null_ "{0,0}" ));
 -DATA(insert ( 2110    n 0 float4pl                    -                                       float4pl                        -                                               -                                               -                               -                                       -                                       f f 0   700             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2111    n 0 float8pl                    -                                       float8pl                        -                                               -                                               -                               -                                       -                                       f f 0   701             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2112    n 0 cash_pl                             -                                       cash_pl                         -                                               -                                               cash_pl                 cash_mi                         -                                       f f 0   790             0       790             0       _null_ _null_ ));
 -DATA(insert ( 2113    n 0 interval_pl                 -                                       interval_pl                     -                                               -                                               interval_pl             interval_mi                     -                                       f f 0   1186    0       1186    0       _null_ _null_ ));
 -DATA(insert ( 2114    n 0 numeric_avg_accum   numeric_sum                     numeric_avg_combine numeric_avg_serialize       numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_sum                 f f 0   2281    128 2281        128 _null_ _null_ ));
++DATA(insert ( 2107    n 0 int8_avg_accum      numeric_poly_collect    numeric_poly_sum int8_avg_combine       int8_avg_serialize      int8_avg_deserialize    int8_avg_accum  int8_avg_accum_inv      numeric_poly_sum                f f 0   7019    7019    128 7019        128 _null_      _null_  _null_ ));
++DATA(insert ( 2108    n 0 int4_sum            int8_sum_to_int8        -                               int8pl  -       -       int4_avg_accum  int4_avg_accum_inv int2int4_sum f f 0   20      20      0       1016    0       _null_  _null_  "{0,0}" ));
++DATA(insert ( 2109    n 0 int2_sum            int8_sum_to_int8        -                               int8pl  -       -       int2_avg_accum  int2_avg_accum_inv int2int4_sum f f 0   20      20      0       1016    0       _null_  _null_  "{0,0}" ));
++DATA(insert ( 2110    n 0 float4pl            float4pl        -                               float4pl        -       -       -                               -                               -                               f f 0   700             700     0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2111    n 0 float8pl            float8pl        -                               float8pl        -       -       -                               -                               -                               f f 0   701             701     0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2112    n 0 cash_pl                     cash_pl -                               cash_pl -       -       cash_pl cash_mi -                               f f 0   790             790     0       790             0       _null_  _null_  _null_ ));
++DATA(insert ( 2113    n 0 interval_pl         interval_pl     -                               interval_pl     -       -       interval_pl             interval_mi             -                               f f 0   1186    1186    0       1186    0       _null_  _null_  _null_ ));
++DATA(insert ( 2114    n 0 numeric_avg_accum   numeric_collect numeric_sum numeric_avg_combine numeric_avg_serialize   numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_sum f f 0   7018    7018    128 7018        128 _null_      _null_  _null_ ));
  
  /* max */
- DATA(insert ( 2115    n 0 int8larger          int8larger      -                               -                               -                               -                               f f 413         20              20      0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2116    n 0 int4larger          int4larger      -                               -                               -                               -                               f f 521         23              23      0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2117    n 0 int2larger          int2larger      -                               -                               -                               -                               f f 520         21              21      0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2118    n 0 oidlarger           oidlarger       -                               -                               -                               -                               f f 610         26              26      0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2119    n 0 float4larger        float4larger    -                               -                               -                               -                               f f 623         700             700     0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2120    n 0 float8larger        float8larger    -                               -                               -                               -                               f f 674         701             701     0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2121    n 0 int4larger          int4larger      -                               -                               -                               -                               f f 563         702             702     0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2122    n 0 date_larger         date_larger     -                               -                               -                               -                               f f 1097        1082    1082    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2123    n 0 time_larger         time_larger     -                               -                               -                               -                               f f 1112        1083    1083    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2124    n 0 timetz_larger       timetz_larger   -                               -                               -                               -                               f f 1554        1266    1266    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2125    n 0 cashlarger          cashlarger      -                               -                               -                               -                               f f 903         790             790     0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2126    n 0 timestamp_larger    timestamp_larger        -                       -                               -                               -                               f f 2064        1114    1114    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2127    n 0 timestamptz_larger  timestamptz_larger      -                       -                               -                               -                               f f 1324        1184    1184    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2128    n 0 interval_larger interval_larger     -                               -                               -                               -                               f f 1334        1186    1186    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2129    n 0 text_larger         text_larger     -                               -                               -                               -                               f f 666         25              25      0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2130    n 0 numeric_larger      numeric_larger  -                               -                               -                               -                               f f 1756        1700    1700    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2050    n 0 array_larger        array_larger    -                               -                               -                               -                               f f 1073        2277    2277    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2244    n 0 bpchar_larger       bpchar_larger   -                               -                               -                               -                               f f 1060        1042    1042    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 2797    n 0 tidlarger           tidlarger       -                               -                               -                               -                               f f 2800        27              27      0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 3526    n 0 enum_larger         enum_larger     -                               -                               -                               -                               f f 3519        3500    3500    0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 3564    n 0 network_larger      network_larger -                                -                       -                               -                               f f 1205        869             869             0       0               0       _null_  _null_ _null_ ));
 -DATA(insert ( 2115    n 0 int8larger          -                               int8larger                      -       -       -                               -                               -                               f f 413         20              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2116    n 0 int4larger          -                               int4larger                      -       -       -                               -                               -                               f f 521         23              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2117    n 0 int2larger          -                               int2larger                      -       -       -                               -                               -                               f f 520         21              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2118    n 0 oidlarger           -                               oidlarger                       -       -       -                               -                               -                               f f 610         26              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2119    n 0 float4larger        -                               float4larger            -       -       -                               -                               -                               f f 623         700             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2120    n 0 float8larger        -                               float8larger            -       -       -                               -                               -                               f f 674         701             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2121    n 0 int4larger          -                               int4larger                      -       -       -                               -                               -                               f f 563         702             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2122    n 0 date_larger         -                               date_larger                     -       -       -                               -                               -                               f f 1097        1082    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2123    n 0 time_larger         -                               time_larger                     -       -       -                               -                               -                               f f 1112        1083    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2124    n 0 timetz_larger       -                               timetz_larger           -       -       -                               -                               -                               f f 1554        1266    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2125    n 0 cashlarger          -                               cashlarger                      -       -       -                               -                               -                               f f 903         790             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2126    n 0 timestamp_larger    -                       timestamp_larger        -       -       -                               -                               -                               f f 2064        1114    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2127    n 0 timestamptz_larger  -                       timestamptz_larger      -       -       -                               -                               -                               f f 1324        1184    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2128    n 0 interval_larger -                           interval_larger         -       -       -                               -                               -                               f f 1334        1186    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2129    n 0 text_larger         -                               text_larger                     -       -       -                               -                               -                               f f 666         25              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2130    n 0 numeric_larger      -                               numeric_larger          -       -       -                               -                               -                               f f 1756        1700    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2050    n 0 array_larger        -                               array_larger            -       -       -                               -                               -                               f f 1073        2277    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2244    n 0 bpchar_larger       -                               bpchar_larger           -       -       -                               -                               -                               f f 1060        1042    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2797    n 0 tidlarger           -                               tidlarger                       -       -       -                               -                               -                               f f 2800        27              0       0               0       _null_ _null_ ));
 -DATA(insert ( 3526    n 0 enum_larger         -                               enum_larger                     -       -       -                               -                               -                               f f 3519        3500    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3564    n 0 network_larger      -                               network_larger          -       -       -                               -                               -                               f f 1205        869             0       0               0       _null_ _null_ ));
++DATA(insert ( 2115    n 0 int8larger          int8larger      -       int8larger      -       -                               -                               -                               -                               f f 413         20              20      0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2116    n 0 int4larger          int4larger      -       int4larger      -       -                               -                               -                               -                               f f 521         23              23      0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2117    n 0 int2larger          int2larger      -       int2larger      -       -                       -                               -                               -                               f f 520         21              21      0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2118    n 0 oidlarger           oidlarger       -       oidlarger       -       -                       -                               -                               -                               f f 610         26              26      0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2119    n 0 float4larger        float4larger    -       float4larger    -       -                       -                               -                               -                               f f 623         700             700     0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2120    n 0 float8larger        float8larger    -       float8larger    -       -                       -                               -                               -                               f f 674         701             701     0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2121    n 0 int4larger          int4larger      -               int4larger      -       -               -                               -                               -                               f f 563         702             702     0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2122    n 0 date_larger         date_larger     -               date_larger     -       -               -                               -                               -                               f f 1097        1082    1082    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2123    n 0 time_larger         time_larger     -               time_larger     -       -               -                               -                               -                               f f 1112        1083    1083    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2124    n 0 timetz_larger       timetz_larger   -       timetz_larger   -       -                       -                               -                               -                               f f 1554        1266    1266    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2125    n 0 cashlarger          cashlarger      -               cashlarger      -       -               -                               -                               -                               f f 903         790             790     0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2126    n 0 timestamp_larger    timestamp_larger        timestamp_larger        -       -       -                       -                               -                               -                               f f 2064        1114    1114    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2127    n 0 timestamptz_larger  timestamptz_larger      timestamptz_larger      -       -       -                       -                               -                               -                               f f 1324        1184    1184    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2128    n 0 interval_larger interval_larger     -       interval_larger -       -                               -                               -                               -                               f f 1334        1186    1186    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2129    n 0 text_larger         text_larger     -               text_larger     -       -                       -                               -                               -                               f f 666         25              25      0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2130    n 0 numeric_larger      numeric_larger  -       numeric_larger  -       -                               -                               -                               -                               f f 1756        1700    1700    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2050    n 0 array_larger        array_larger    -       array_larger    -       -                               -                               -                               -                               f f 1073        2277    2277    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2244    n 0 bpchar_larger       bpchar_larger   -       bpchar_larger   -       -                               -                               -                               -                               f f 1060        1042    1042    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 2797    n 0 tidlarger           tidlarger       -               tidlarger       -       -               -                               -                               -                               f f 2800        27              27      0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 3526    n 0 enum_larger         enum_larger     -               enum_larger     -       -                       -                               -                               -                               f f 3519        3500    3500    0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 3564    n 0 network_larger      network_larger -        network_larger  -       -                               -                       -                               -                               f f 1205        869             869             0       0               0       _null_  _null_ _null_ ));
  
  /* min */
- DATA(insert ( 2131    n 0 int8smaller         int8smaller     -                               -                               -                               -                               f f 412         20              20              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2132    n 0 int4smaller         int4smaller     -                               -                               -                               -                               f f 97          23              23              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2133    n 0 int2smaller         int2smaller     -                               -                               -                               -                               f f 95          21              21              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2134    n 0 oidsmaller          oidsmaller      -                               -                               -                               -                               f f 609         26              26              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2135    n 0 float4smaller       float4smaller   -                               -                               -                               -                               f f 622         700             700             0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2136    n 0 float8smaller       float8smaller   -                               -                               -                               -                               f f 672         701             701             0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2137    n 0 int4smaller         int4smaller     -                               -                               -                               -                               f f 562         702             702             0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2138    n 0 date_smaller        date_smaller    -                               -                               -                               -                               f f 1095        1082    1082    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2139    n 0 time_smaller        time_smaller    -                               -                               -                               -                               f f 1110        1083    1083    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2140    n 0 timetz_smaller      timetz_smaller  -                               -                               -                               -                               f f 1552        1266    1266    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2141    n 0 cashsmaller         cashsmaller     -                               -                               -                               -                               f f 902         790             790             0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2142    n 0 timestamp_smaller   timestamp_smaller       -                       -                               -                               -                               f f 2062        1114    1114    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2143    n 0 timestamptz_smaller timestamptz_smaller     -                       -                               -                               -                               f f 1322        1184    1184    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2144    n 0 interval_smaller    interval_smaller        -                       -                               -                               -                               f f 1332        1186    1186    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2145    n 0 text_smaller        text_smaller    -                               -                               -                               -                               f f 664         25              25              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2146    n 0 numeric_smaller numeric_smaller     -                               -                               -                               -                               f f 1754        1700    1700    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2051    n 0 array_smaller       array_smaller   -                               -                               -                               -                               f f 1072        2277    2277    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2245    n 0 bpchar_smaller      bpchar_smaller  -                               -                               -                               -                               f f 1058        1042    1042    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2798    n 0 tidsmaller          tidsmaller      -                               -                               -                               -                               f f 2799        27              27              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3527    n 0 enum_smaller        enum_smaller    -                               -                               -                               -                               f f 3518        3500    3500    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3565    n 0 network_smaller network_smaller     -                               -                               -                               -                               f f 1203        869             869             0       0               0       _null_ _null_ _null_ ));
 -DATA(insert ( 2131    n 0 int8smaller         -                               int8smaller                     -       -       -                               -                               -                               f f 412         20              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2132    n 0 int4smaller         -                               int4smaller                     -       -       -                               -                               -                               f f 97          23              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2133    n 0 int2smaller         -                               int2smaller                     -       -       -                               -                               -                               f f 95          21              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2134    n 0 oidsmaller          -                               oidsmaller                      -       -       -                               -                               -                               f f 609         26              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2135    n 0 float4smaller       -                               float4smaller           -       -       -                               -                               -                               f f 622         700             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2136    n 0 float8smaller       -                               float8smaller           -       -       -                               -                               -                               f f 672         701             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2137    n 0 int4smaller         -                               int4smaller                     -       -       -                               -                               -                               f f 562         702             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2138    n 0 date_smaller        -                               date_smaller            -       -       -                               -                               -                               f f 1095        1082    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2139    n 0 time_smaller        -                               time_smaller            -       -       -                               -                               -                               f f 1110        1083    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2140    n 0 timetz_smaller      -                               timetz_smaller          -       -       -                               -                               -                               f f 1552        1266    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2141    n 0 cashsmaller         -                               cashsmaller                     -       -       -                               -                               -                               f f 902         790             0       0               0       _null_ _null_ ));
 -DATA(insert ( 2142    n 0 timestamp_smaller   -                       timestamp_smaller       -       -       -                               -                               -                               f f 2062        1114    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2143    n 0 timestamptz_smaller -                       timestamptz_smaller -   -       -                               -                               -                               f f 1322        1184    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2144    n 0 interval_smaller    -                       interval_smaller        -       -       -                               -                               -                               f f 1332        1186    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2145    n 0 text_smaller        -                               text_smaller            -       -       -                               -                               -                               f f 664         25              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2146    n 0 numeric_smaller -                           numeric_smaller         -       -       -                               -                               -                               f f 1754        1700    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2051    n 0 array_smaller       -                               array_smaller           -       -       -                               -                               -                               f f 1072        2277    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2245    n 0 bpchar_smaller      -                               bpchar_smaller          -       -       -                               -                               -                               f f 1058        1042    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2798    n 0 tidsmaller          -                               tidsmaller                      -       -       -                               -                               -                               f f 2799        27              0       0               0       _null_ _null_ ));
 -DATA(insert ( 3527    n 0 enum_smaller        -                               enum_smaller            -       -       -                               -                               -                               f f 3518        3500    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3565    n 0 network_smaller -                           network_smaller         -       -       -                               -                               -                               f f 1203        869             0       0               0       _null_ _null_ ));
++DATA(insert ( 2131    n 0 int8smaller         int8smaller     -               int8smaller     -       -                       -                               -                               -                               f f 412         20              20              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2132    n 0 int4smaller         int4smaller     -               int4smaller     -       -                       -                               -                               -                               f f 97          23              23              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2133    n 0 int2smaller         int2smaller     -               int2smaller     -       -                       -                               -                               -                               f f 95          21              21              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2134    n 0 oidsmaller          oidsmaller      -               oidsmaller      -       -                       -                               -                               -                               f f 609         26              26              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2135    n 0 float4smaller       float4smaller   -       float4smaller   -       -                       -                               -                               -                               f f 622         700             700             0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2136    n 0 float8smaller       float8smaller   -       float8smaller   -       -                               -                               -                               -                               f f 672         701             701             0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2137    n 0 int4smaller         int4smaller     -               int4smaller     -       -                       -                               -                               -                               f f 562         702             702             0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2138    n 0 date_smaller        date_smaller    -       date_smaller    -       -                               -                               -                               -                               f f 1095        1082    1082    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2139    n 0 time_smaller        time_smaller    -       time_smaller    -       -                               -                               -                               -                               f f 1110        1083    1083    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2140    n 0 timetz_smaller      timetz_smaller  -       timetz_smaller  -       -                               -                               -                               -                               f f 1552        1266    1266    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2141    n 0 cashsmaller         cashsmaller     -       cashsmaller     -       -                               -                               -                               -                               f f 902         790             790             0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2142    n 0 timestamp_smaller   timestamp_smaller       -       timestamp_smaller       -       -                       -                               -                               -                               f f 2062        1114    1114    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2143    n 0 timestamptz_smaller timestamptz_smaller     -       timestamptz_smaller     -       -                       -                               -                               -                               f f 1322        1184    1184    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2144    n 0 interval_smaller    interval_smaller        -       interval_smaller        -       -                       -                               -                               -                               f f 1332        1186    1186    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2145    n 0 text_smaller        text_smaller    -       text_smaller    -       -                               -                               -                               -                               f f 664         25              25              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2146    n 0 numeric_smaller numeric_smaller     -       text_smaller    -       -                               -                               -                               -                               f f 1754        1700    1700    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2051    n 0 array_smaller       array_smaller   -       array_smaller   -       -                               -                               -                               -                               f f 1072        2277    2277    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2245    n 0 bpchar_smaller      bpchar_smaller  -       array_smaller   -       -                       -                               -                               -                               f f 1058        1042    1042    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2798    n 0 tidsmaller          tidsmaller      -               array_smaller   -       -                       -                               -                               -                               f f 2799        27              27              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3527    n 0 enum_smaller        enum_smaller    -       enum_smaller    -       -                               -                               -                               -                               f f 3518        3500    3500    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3565    n 0 network_smaller network_smaller     -       network_smaller -       -                               -                               -                               -                               f f 1203        869             869             0       0               0       _null_ _null_ _null_ ));
  
  /* count */
- DATA(insert ( 2147    n 0 int8inc_any         int8_sum_to_int8        -                               int8inc_any             int8dec_any             -                               f f 0           20              20      0       20              0       "0" _null_      "0" ));
- DATA(insert ( 2803    n 0 int8inc                     int8_sum_to_int8        -                               int8inc                 int8dec                 -                               f f 0           20              20      0       20              0       "0" _null_      "0" ));
 -DATA(insert ( 2147    n 0 int8inc_any         -                               int8pl  -       -       int8inc_any             int8dec_any             -                               f f 0           20              0       20              0       "0" "0" ));
 -DATA(insert ( 2803    n 0 int8inc                     -                               int8pl  -       -       int8inc                 int8dec                 -                               f f 0           20              0       20              0       "0" "0" ));
++DATA(insert ( 2147    n 0 int8inc_any         int8_sum_to_int8        -       int8pl  -       -                       int8inc_any             int8dec_any             -                               f f 0           20              20      0       20              0       "0" _null_      "0" ));
++DATA(insert ( 2803    n 0 int8inc                     int8_sum_to_int8        -       int8pl  -       -                       int8inc                 int8dec                 -                               f f 0           20              20      0       20              0       "0" _null_      "0" ));
  
  /* var_pop */
- DATA(insert ( 2718    n 0 int8_accum  numeric_collect numeric_var_pop         int8_accum              int8_accum_inv  numeric_var_pop f f 0   7018    7018    128 7018        128 _null_      _null_  _null_ ));
- DATA(insert ( 2719    n 0 int4_accum  numeric_poly_collect    numeric_poly_var_pop int4_accum         int4_accum_inv  numeric_poly_var_pop f f 0      7019    7019    128 7019        128 _null_      _null_  _null_ ));
- DATA(insert ( 2720    n 0 int2_accum  numeric_poly_collect    numeric_poly_var_pop int2_accum         int2_accum_inv  numeric_poly_var_pop f f 0      7019    7019    128 7019        128 _null_      _null_  _null_ ));
- DATA(insert ( 2721    n 0 float4_accum        float8_collect  float8_var_pop  -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}"       "{0,0,0}"       _null_ ));
- DATA(insert ( 2722    n 0 float8_accum        float8_collect  float8_var_pop  -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}"       "{0,0,0}"       _null_ ));
- DATA(insert ( 2723    n 0 numeric_accum       numeric_collect numeric_var_pop numeric_accum numeric_accum_inv numeric_var_pop f f 0   7018    7018    128 7018        128 _null_      _null_  _null_ ));
 -DATA(insert ( 2718    n 0 int8_accum          numeric_var_pop                 numeric_combine                 numeric_serialize               numeric_deserialize                     int8_accum              int8_accum_inv  numeric_var_pop                 f f 0   2281    128 2281        128 _null_ _null_ ));
 -DATA(insert ( 2719    n 0 int4_accum          numeric_poly_var_pop    numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int4_accum              int4_accum_inv  numeric_poly_var_pop    f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2720    n 0 int2_accum          numeric_poly_var_pop    numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int2_accum              int2_accum_inv  numeric_poly_var_pop    f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2721    n 0 float4_accum        float8_var_pop                  float8_combine                  -                                               -                                                       -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2722    n 0 float8_accum        float8_var_pop                  float8_combine                  -                                               -                                                       -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2723    n 0 numeric_accum       numeric_var_pop                 numeric_combine                 numeric_serialize               numeric_deserialize                     numeric_accum   numeric_accum_inv numeric_var_pop               f f 0   2281    128 2281        128 _null_ _null_ ));
++DATA(insert ( 2718    n 0 int8_accum  numeric_collect numeric_var_pop         numeric_combine numeric_serialize       numeric_deserialize     int8_accum              int8_accum_inv  numeric_var_pop f f 0   7018    7018    128 7018        128 _null_      _null_  _null_ ));
++DATA(insert ( 2719    n 0 int4_accum  numeric_poly_collect    numeric_poly_var_pop numeric_poly_combine       numeric_poly_serialize  numeric_poly_deserialize        int4_accum              int4_accum_inv  numeric_poly_var_pop f f 0      7019    7019    128 7019        128 _null_      _null_  _null_ ));
++DATA(insert ( 2720    n 0 int2_accum  numeric_poly_collect    numeric_poly_var_pop numeric_poly_combine   numeric_poly_serialize  numeric_poly_deserialize    int2_accum              int2_accum_inv  numeric_poly_var_pop f f 0      7019    7019    128 7019        128 _null_      _null_  _null_ ));
++DATA(insert ( 2721    n 0 float4_accum        float8_collect  float8_var_pop  float8_combine  -       -               -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}"       "{0,0,0}"       _null_ ));
++DATA(insert ( 2722    n 0 float8_accum        float8_collect  float8_var_pop  float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}"       "{0,0,0}"       _null_ ));
++DATA(insert ( 2723    n 0 numeric_accum       numeric_collect numeric_var_pop numeric_combine numeric_serialize       numeric_deserialize     numeric_accum numeric_accum_inv numeric_var_pop f f 0   7018    7018    128 7018        128 _null_      _null_  _null_ ));
  
  /* var_samp */
- DATA(insert ( 2641    n 0 int8_accum          numeric_collect numeric_var_samp        int8_accum              int8_accum_inv  numeric_var_samp f f 0  7018    7018    128 7018        128 _null_      _null_  _null_ ));
- DATA(insert ( 2642    n 0 int4_accum          numeric_poly_collect    numeric_poly_var_samp int4_accum                int4_accum_inv  numeric_poly_var_samp f f 0     7019    7019    128 7019        128 _null_      _null_  _null_ ));
- DATA(insert ( 2643    n 0 int2_accum          numeric_poly_collect    numeric_poly_var_samp int2_accum                int2_accum_inv  numeric_poly_var_samp f f 0     7019    7019    128 7019        128 _null_      _null_  _null_ ));
- DATA(insert ( 2644    n 0 float4_accum        float8_collect  float8_var_samp -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}"     _null_ ));
- DATA(insert ( 2645    n 0 float8_accum        float8_collect  float8_var_samp -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}"     _null_ ));
- DATA(insert ( 2646    n 0 numeric_accum       numeric_collect numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp f f 0 7018    7018    128 7018        128 _null_      _null_  _null_ ));
 -DATA(insert ( 2641    n 0 int8_accum          numeric_var_samp                numeric_combine                 numeric_serialize               numeric_deserialize                     int8_accum              int8_accum_inv  numeric_var_samp                f f 0   2281    128 2281        128 _null_ _null_ ));
 -DATA(insert ( 2642    n 0 int4_accum          numeric_poly_var_samp   numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int4_accum              int4_accum_inv  numeric_poly_var_samp   f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2643    n 0 int2_accum          numeric_poly_var_samp   numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int2_accum              int2_accum_inv  numeric_poly_var_samp   f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2644    n 0 float4_accum        float8_var_samp                 float8_combine                  -                                               -                                                       -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2645    n 0 float8_accum        float8_var_samp                 float8_combine                  -                                               -                                                       -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2646    n 0 numeric_accum       numeric_var_samp                numeric_combine                 numeric_serialize               numeric_deserialize                     numeric_accum   numeric_accum_inv numeric_var_samp              f f 0   2281    128 2281        128 _null_ _null_ ));
++DATA(insert ( 2641    n 0 int8_accum          numeric_collect numeric_var_samp        numeric_combine numeric_serialize       numeric_deserialize     int8_accum              int8_accum_inv  numeric_var_samp f f 0  7018    7018    128 7018        128 _null_      _null_  _null_ ));
++DATA(insert ( 2642    n 0 int4_accum          numeric_poly_collect    numeric_poly_var_samp numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize  int4_accum              int4_accum_inv  numeric_poly_var_samp f f 0     7019    7019    128 7019        128 _null_      _null_  _null_ ));
++DATA(insert ( 2643    n 0 int2_accum          numeric_poly_collect    numeric_poly_var_samp numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize  int2_accum              int2_accum_inv  numeric_poly_var_samp f f 0     7019    7019    128 7019        128 _null_      _null_  _null_ ));
++DATA(insert ( 2644    n 0 float4_accum        float8_collect  float8_var_samp float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}"     _null_ ));
++DATA(insert ( 2645    n 0 float8_accum        float8_collect  float8_var_samp float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}"     _null_ ));
++DATA(insert ( 2646    n 0 numeric_accum       numeric_collect numeric_var_samp numeric_combine         numeric_serialize       numeric_deserialize    numeric_accum numeric_accum_inv numeric_var_samp f f 0 7018     7018    128 7018        128 _null_      _null_  _null_ ));
  
  /* variance: historical Postgres syntax for var_samp */
- DATA(insert ( 2148    n 0 int8_accum          numeric_collect numeric_var_samp        int8_accum              int8_accum_inv  numeric_var_samp f f 0  7018    7018    128 7018        128 _null_ _null_ _null_ ));
- DATA(insert ( 2149    n 0 int4_accum          numeric_poly_collect    numeric_poly_var_samp int4_accum                int4_accum_inv  numeric_poly_var_samp f f 0     7019    7019    128 7019        128 _null_ _null_ _null_ ));
- DATA(insert ( 2150    n 0 int2_accum          numeric_poly_collect    numeric_poly_var_samp int2_accum                int2_accum_inv  numeric_poly_var_samp f f 0     7019    7019    128 7019        128 _null_ _null_ _null_ ));
- DATA(insert ( 2151    n 0 float4_accum        float8_collect  float8_var_samp -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2152    n 0 float8_accum        float8_collect  float8_var_samp -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2153    n 0 numeric_accum       numeric_collect numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp f f 0 7018    7018    128 7018        128 _null_      _null_ _null_ ));
 -DATA(insert ( 2148    n 0 int8_accum          numeric_var_samp                numeric_combine                 numeric_serialize               numeric_deserialize                     int8_accum              int8_accum_inv  numeric_var_samp                f f 0   2281    128 2281        128 _null_ _null_ ));
 -DATA(insert ( 2149    n 0 int4_accum          numeric_poly_var_samp   numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int4_accum              int4_accum_inv  numeric_poly_var_samp   f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2150    n 0 int2_accum          numeric_poly_var_samp   numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int2_accum              int2_accum_inv  numeric_poly_var_samp   f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2151    n 0 float4_accum        float8_var_samp                 float8_combine                  -                                               -                                                       -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2152    n 0 float8_accum        float8_var_samp                 float8_combine                  -                                               -                                                       -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2153    n 0 numeric_accum       numeric_var_samp                numeric_combine                 numeric_serialize               numeric_deserialize                     numeric_accum   numeric_accum_inv numeric_var_samp              f f 0   2281    128 2281        128 _null_ _null_ ));
++DATA(insert ( 2148    n 0 int8_accum          numeric_collect numeric_var_samp        numeric_combine         numeric_serialize       numeric_deserialize     int8_accum              int8_accum_inv  numeric_var_samp f f 0  7018    7018    128 7018        128 _null_ _null_ _null_ ));
++DATA(insert ( 2149    n 0 int4_accum          numeric_poly_collect    numeric_poly_var_samp numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize  int4_accum              int4_accum_inv  numeric_poly_var_samp f f 0     7019    7019    128 7019        128 _null_ _null_ _null_ ));
++DATA(insert ( 2150    n 0 int2_accum          numeric_poly_collect    numeric_poly_var_samp numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize  int2_accum              int2_accum_inv  numeric_poly_var_samp f f 0     7019    7019    128 7019        128 _null_ _null_ _null_ ));
++DATA(insert ( 2151    n 0 float4_accum        float8_collect  float8_var_samp float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2152    n 0 float8_accum        float8_collect  float8_var_samp float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2153    n 0 numeric_accum       numeric_collect numeric_var_samp numeric_combine         numeric_serialize       numeric_deserialize    numeric_accum numeric_accum_inv numeric_var_samp f f 0 7018     7018    128 7018        128 _null_      _null_ _null_ ));
  
  /* stddev_pop */
- DATA(insert ( 2724    n 0 int8_accum          numeric_collect numeric_stddev_pop              int8_accum      int8_accum_inv  numeric_stddev_pop      f f 0   7018    7018    128 7018        128 _null_ _null_ _null_ ));
- DATA(insert ( 2725    n 0 int4_accum          numeric_poly_collect    numeric_poly_stddev_pop int4_accum      int4_accum_inv  numeric_poly_stddev_pop f f 0   7019    7019    128 7019        128 _null_ _null_ _null_ ));
- DATA(insert ( 2726    n 0 int2_accum          numeric_poly_collect    numeric_poly_stddev_pop int2_accum      int2_accum_inv  numeric_poly_stddev_pop f f 0   7019    7019    128 7019        128 _null_ _null_ _null_ ));
- DATA(insert ( 2727    n 0 float4_accum        float8_collect  float8_stddev_pop       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2728    n 0 float8_accum        float8_collect  float8_stddev_pop       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2729    n 0 numeric_accum       numeric_collect numeric_stddev_pop numeric_accum numeric_accum_inv numeric_stddev_pop f f 0 7018        7018    128 7018        128 _null_      _null_ _null_ ));
 -DATA(insert ( 2724    n 0 int8_accum          numeric_stddev_pop              numeric_combine                 numeric_serialize               numeric_deserialize                     int8_accum              int8_accum_inv  numeric_stddev_pop              f f 0   2281    128 2281        128 _null_ _null_ ));
 -DATA(insert ( 2725    n 0 int4_accum          numeric_poly_stddev_pop numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int4_accum              int4_accum_inv  numeric_poly_stddev_pop f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2726    n 0 int2_accum          numeric_poly_stddev_pop numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int2_accum              int2_accum_inv  numeric_poly_stddev_pop f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2727    n 0 float4_accum        float8_stddev_pop               float8_combine                  -                                               -                                                       -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2728    n 0 float8_accum        float8_stddev_pop               float8_combine                  -                                               -                                                       -                               -                               -                                               f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2729    n 0 numeric_accum       numeric_stddev_pop              numeric_combine                 numeric_serialize               numeric_deserialize                     numeric_accum   numeric_accum_inv numeric_stddev_pop    f f 0   2281    128 2281        128 _null_ _null_ ));
++DATA(insert ( 2724    n 0 int8_accum          numeric_collect numeric_stddev_pop      numeric_combine         numeric_serialize       numeric_deserialize             int8_accum      int8_accum_inv  numeric_stddev_pop      f f 0   7018    7018    128 7018        128 _null_ _null_ _null_ ));
++DATA(insert ( 2725    n 0 int4_accum          numeric_poly_collect    numeric_poly_stddev_pop numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int4_accum      int4_accum_inv  numeric_poly_stddev_pop f f 0   7019    7019    128 7019        128 _null_ _null_ _null_ ));
++DATA(insert ( 2726    n 0 int2_accum          numeric_poly_collect    numeric_poly_stddev_pop numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int2_accum      int2_accum_inv  numeric_poly_stddev_pop f f 0   7019    7019    128 7019        128 _null_ _null_ _null_ ));
++DATA(insert ( 2727    n 0 float4_accum        float8_collect  float8_stddev_pop       float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2728    n 0 float8_accum        float8_collect  float8_stddev_pop       float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2729    n 0 numeric_accum       numeric_collect numeric_stddev_pop numeric_combine         numeric_serialize       numeric_deserialize          numeric_accum numeric_accum_inv numeric_stddev_pop f f 0 7018   7018    128 7018        128 _null_      _null_ _null_ ));
  
  /* stddev_samp */
- DATA(insert ( 2712    n 0 int8_accum          numeric_collect numeric_stddev_samp             int8_accum      int8_accum_inv  numeric_stddev_samp f f 0       7018    7018    128 7018        128 _null_ _null_ _null_ ));
- DATA(insert ( 2713    n 0 int4_accum          numeric_poly_collect    numeric_poly_stddev_samp                int4_accum      int4_accum_inv  numeric_poly_stddev_samp f f 0  7019    7019    128 7019        128 _null_ _null_ _null_ ));
- DATA(insert ( 2714    n 0 int2_accum          numeric_poly_collect    numeric_poly_stddev_samp                int2_accum      int2_accum_inv  numeric_poly_stddev_samp f f 0  7019    7019    128 7019        128 _null_ _null_ _null_ ));
- DATA(insert ( 2715    n 0 float4_accum        float8_collect  float8_stddev_samp      -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2716    n 0 float8_accum        float8_collect  float8_stddev_samp      -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2717    n 0 numeric_accum       numeric_collect numeric_stddev_samp numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 7018      7018    128 7018        128 _null_      _null_ _null_ ));
 -DATA(insert ( 2712    n 0 int8_accum          numeric_stddev_samp                     numeric_combine                 numeric_serialize               numeric_deserialize                     int8_accum      int8_accum_inv  numeric_stddev_samp                     f f 0   2281    128 2281        128 _null_ _null_ ));
 -DATA(insert ( 2713    n 0 int4_accum          numeric_poly_stddev_samp        numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int4_accum      int4_accum_inv  numeric_poly_stddev_samp        f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2714    n 0 int2_accum          numeric_poly_stddev_samp        numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int2_accum      int2_accum_inv  numeric_poly_stddev_samp        f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2715    n 0 float4_accum        float8_stddev_samp                      float8_combine                  -                                               -                                                       -                       -                               -                                                       f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2716    n 0 float8_accum        float8_stddev_samp                      float8_combine                  -                                               -                                                       -                       -                               -                                                       f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2717    n 0 numeric_accum       numeric_stddev_samp                     numeric_combine                 numeric_serialize               numeric_deserialize                     numeric_accum numeric_accum_inv numeric_stddev_samp             f f 0   2281    128 2281        128 _null_ _null_ ));
++DATA(insert ( 2712    n 0 int8_accum          numeric_collect numeric_stddev_samp     numeric_combine         numeric_serialize       numeric_deserialize                     int8_accum      int8_accum_inv  numeric_stddev_samp f f 0       7018    7018    128 7018        128 _null_ _null_ _null_ ));
++DATA(insert ( 2713    n 0 int4_accum          numeric_poly_collect    numeric_poly_stddev_samp numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize                       int4_accum      int4_accum_inv  numeric_poly_stddev_samp f f 0  7019    7019    128 7019        128 _null_ _null_ _null_ ));
++DATA(insert ( 2714    n 0 int2_accum          numeric_poly_collect    numeric_poly_stddev_samp        numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize                int2_accum      int2_accum_inv  numeric_poly_stddev_samp f f 0  7019    7019    128 7019        128 _null_ _null_ _null_ ));
++DATA(insert ( 2715    n 0 float4_accum        float8_collect  float8_stddev_samp      float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2716    n 0 float8_accum        float8_collect  float8_stddev_samp      float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2717    n 0 numeric_accum       numeric_collect numeric_stddev_samp numeric_combine         numeric_serialize       numeric_deserialize         numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 7018  7018    128 7018        128 _null_      _null_ _null_ ));
  
  /* stddev: historical Postgres syntax for stddev_samp */
- DATA(insert ( 2154    n 0 int8_accum          numeric_collect numeric_stddev_samp             int8_accum      int8_accum_inv  numeric_stddev_samp f f 0       7018    7018    128 7018        128 _null_ _null_ _null_ ));
- DATA(insert ( 2155    n 0 int4_accum          numeric_poly_collect    numeric_poly_stddev_samp                int4_accum      int4_accum_inv  numeric_poly_stddev_samp f f 0  7019    7019    128 7019        128 _null_ _null_ _null_ ));
- DATA(insert ( 2156    n 0 int2_accum          numeric_poly_collect    numeric_poly_stddev_samp                int2_accum      int2_accum_inv  numeric_poly_stddev_samp f f 0  7019    7019    128 7019        128 _null_ _null_ _null_ ));
- DATA(insert ( 2157    n 0 float4_accum        float8_collect  float8_stddev_samp      -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2158    n 0 float8_accum        float8_collect  float8_stddev_samp      -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
- DATA(insert ( 2159    n 0 numeric_accum       numeric_collect numeric_stddev_samp numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 7018      7018    128 7018        128 _null_      _null_ _null_ ));
 -DATA(insert ( 2154    n 0 int8_accum          numeric_stddev_samp                     numeric_combine                 numeric_serialize               numeric_deserialize                     int8_accum              int8_accum_inv  numeric_stddev_samp                     f f 0   2281    128 2281        128 _null_ _null_ ));
 -DATA(insert ( 2155    n 0 int4_accum          numeric_poly_stddev_samp        numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int4_accum              int4_accum_inv  numeric_poly_stddev_samp        f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2156    n 0 int2_accum          numeric_poly_stddev_samp        numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int2_accum              int2_accum_inv  numeric_poly_stddev_samp        f f 0   2281    48      2281    48      _null_ _null_ ));
 -DATA(insert ( 2157    n 0 float4_accum        float8_stddev_samp                      float8_combine                  -                                               -                                                       -                               -                               -                                                       f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2158    n 0 float8_accum        float8_stddev_samp                      float8_combine                  -                                               -                                                       -                               -                               -                                                       f f 0   1022    0       0               0       "{0,0,0}" _null_ ));
 -DATA(insert ( 2159    n 0 numeric_accum       numeric_stddev_samp                     numeric_combine                 numeric_serialize               numeric_deserialize                     numeric_accum   numeric_accum_inv numeric_stddev_samp           f f 0   2281    128 2281        128 _null_ _null_ ));
++DATA(insert ( 2154    n 0 int8_accum          numeric_collect numeric_stddev_samp             numeric_combine         numeric_serialize       numeric_deserialize             int8_accum      int8_accum_inv  numeric_stddev_samp f f 0       7018    7018    128 7018        128 _null_ _null_ _null_ ));
++DATA(insert ( 2155    n 0 int4_accum          numeric_poly_collect    numeric_poly_stddev_sampnumeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize                        int4_accum      int4_accum_inv  numeric_poly_stddev_samp f f 0  7019    7019    128 7019        128 _null_ _null_ _null_ ));
++DATA(insert ( 2156    n 0 int2_accum          numeric_poly_collect    numeric_poly_stddev_samp        numeric_poly_combine    numeric_poly_serialize  numeric_poly_deserialize        int2_accum      int2_accum_inv  numeric_poly_stddev_samp f f 0  7019    7019    128 7019        128 _null_ _null_ _null_ ));
++DATA(insert ( 2157    n 0 float4_accum        float8_collect  float8_stddev_samp      float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2158    n 0 float8_accum        float8_collect  float8_stddev_samp      float8_combine  -       -       -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0}" "{0,0,0}" _null_ ));
++DATA(insert ( 2159    n 0 numeric_accum       numeric_collect numeric_stddev_samp numeric_combine         numeric_serialize       numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 7018  7018    128 7018        128 _null_      _null_ _null_ ));
  
  /* SQL2003 binary regression aggregates */
- DATA(insert ( 2818    n 0 int8inc_float8_float8       int8_sum_to_int8        -                                       -                               -                               -                               f f 0   20              20              0       0               0       "0" _null_      _null_ ));
- DATA(insert ( 2819    n 0 float8_regr_accum   float8_regr_collect     float8_regr_sxx                 -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2820    n 0 float8_regr_accum   float8_regr_collect     float8_regr_syy                 -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2821    n 0 float8_regr_accum   float8_regr_collect     float8_regr_sxy                 -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2822    n 0 float8_regr_accum   float8_regr_collect     float8_regr_avgx                -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2823    n 0 float8_regr_accum   float8_regr_collect     float8_regr_avgy                -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2824    n 0 float8_regr_accum   float8_regr_collect     float8_regr_r2                  -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2825    n 0 float8_regr_accum   float8_regr_collect     float8_regr_slope               -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2826    n 0 float8_regr_accum   float8_regr_collect     float8_regr_intercept   -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2827    n 0 float8_regr_accum   float8_regr_collect     float8_covar_pop                -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2828    n 0 float8_regr_accum   float8_regr_collect     float8_covar_samp               -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
- DATA(insert ( 2829    n 0 float8_regr_accum   float8_regr_collect     float8_corr                             -                               -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2818    n 0 int8inc_float8_float8       -                                       int8pl                          -       -       -                               -                               -                       f f 0   20              0       0               0       "0" _null_ ));
 -DATA(insert ( 2819    n 0 float8_regr_accum   float8_regr_sxx                 float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2820    n 0 float8_regr_accum   float8_regr_syy                 float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2821    n 0 float8_regr_accum   float8_regr_sxy                 float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2822    n 0 float8_regr_accum   float8_regr_avgx                float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2823    n 0 float8_regr_accum   float8_regr_avgy                float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2824    n 0 float8_regr_accum   float8_regr_r2                  float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2825    n 0 float8_regr_accum   float8_regr_slope               float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2826    n 0 float8_regr_accum   float8_regr_intercept   float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2827    n 0 float8_regr_accum   float8_covar_pop                float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2828    n 0 float8_regr_accum   float8_covar_samp               float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
 -DATA(insert ( 2829    n 0 float8_regr_accum   float8_corr                             float8_regr_combine -   -       -                               -                               -                       f f 0   1022    0       0               0       "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2818    n 0 int8inc_float8_float8       int8_sum_to_int8        int8pl  -       -       -                                       -                               -                               -                               f f 0   20              20              0       0               0       "0" _null_      _null_ ));
++DATA(insert ( 2819    n 0 float8_regr_accum   float8_regr_collect     float8_regr_sxx                 float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2820    n 0 float8_regr_accum   float8_regr_collect     float8_regr_syy                 float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2821    n 0 float8_regr_accum   float8_regr_collect     float8_regr_sxy                 float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2822    n 0 float8_regr_accum   float8_regr_collect     float8_regr_avgx                float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2823    n 0 float8_regr_accum   float8_regr_collect     float8_regr_avgy                float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2824    n 0 float8_regr_accum   float8_regr_collect     float8_regr_r2                  float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2825    n 0 float8_regr_accum   float8_regr_collect     float8_regr_slope               float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2826    n 0 float8_regr_accum   float8_regr_collect     float8_regr_intercept   float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2827    n 0 float8_regr_accum   float8_regr_collect     float8_covar_pop                float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2828    n 0 float8_regr_accum   float8_regr_collect     float8_covar_samp               float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
++DATA(insert ( 2829    n 0 float8_regr_accum   float8_regr_collect     float8_corr                             float8_regr_combine -   -   -                           -                               -                               f f 0   1022    1022    0       0               0       "{0,0,0,0,0,0}" "{0,0,0,0,0,0}" _null_ ));
  
  /* boolean-and and boolean-or */
- DATA(insert ( 2517    n 0 booland_statefunc   booland_statefunc       - bool_accum            bool_accum_inv  bool_alltrue    f f 58  16              16 0    2281    16      _null_ _null_ _null_ ));
- DATA(insert ( 2518    n 0 boolor_statefunc    boolor_statefunc        - bool_accum            bool_accum_inv  bool_anytrue    f f 59  16              16 0    2281    16      _null_ _null_ _null_ ));
- DATA(insert ( 2519    n 0 booland_statefunc   booland_statefunc       - bool_accum            bool_accum_inv  bool_alltrue    f f 58  16              16 0    2281    16      _null_ _null_ _null_ ));
 -DATA(insert ( 2517    n 0 booland_statefunc   -       booland_statefunc       -       -       bool_accum      bool_accum_inv  bool_alltrue    f f 58  16      0       2281    16      _null_ _null_ ));
 -DATA(insert ( 2518    n 0 boolor_statefunc    -       boolor_statefunc        -       -       bool_accum      bool_accum_inv  bool_anytrue    f f 59  16      0       2281    16      _null_ _null_ ));
 -DATA(insert ( 2519    n 0 booland_statefunc   -       booland_statefunc       -       -       bool_accum      bool_accum_inv  bool_alltrue    f f 58  16      0       2281    16      _null_ _null_ ));
++DATA(insert ( 2517    n 0 booland_statefunc   booland_statefunc       - booland_statefunc     -       -       bool_accum              bool_accum_inv  bool_alltrue    f f 58  16              16 0    2281    16      _null_ _null_ _null_ ));
++DATA(insert ( 2518    n 0 boolor_statefunc    boolor_statefunc        - boolor_statefunc      -       -       bool_accum              bool_accum_inv  bool_anytrue    f f 59  16              16 0    2281    16      _null_ _null_ _null_ ));
++DATA(insert ( 2519    n 0 booland_statefunc   booland_statefunc       - booland_statefunc     -       -       bool_accum              bool_accum_inv  bool_alltrue    f f 58  16              16 0    2281    16      _null_ _null_ _null_ ));
  
  /* bitwise integer */
- DATA(insert ( 2236    n 0 int2and             int2and         -                                       -                               -                               -                               f f 0   21              21              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2237    n 0 int2or              int2or          -                                       -                               -                               -                               f f 0   21              21              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2238    n 0 int4and             int4and         -                                       -                               -                               -                               f f 0   23              23              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2239    n 0 int4or              int4or          -                                       -                               -                               -                               f f 0   23              23              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2240    n 0 int8and             int8and         -                                       -                               -                               -                               f f 0   20              20              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2241    n 0 int8or              int8or          -                                       -                               -                               -                               f f 0   20              20              0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2242    n 0 bitand              bitand          -                                       -                               -                               -                               f f 0   1560    1560    0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 2243    n 0 bitor               bitor           -                                       -                               -                               -                               f f 0   1560    1560    0       0               0       _null_ _null_ _null_ ));
 -DATA(insert ( 2236    n 0 int2and             -                               int2and -       -       -                               -                               -                               f f 0   21              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2237    n 0 int2or              -                               int2or  -       -       -                               -                               -                               f f 0   21              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2238    n 0 int4and             -                               int4and -       -       -                               -                               -                               f f 0   23              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2239    n 0 int4or              -                               int4or  -       -       -                               -                               -                               f f 0   23              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2240    n 0 int8and             -                               int8and -       -       -                               -                               -                               f f 0   20              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2241    n 0 int8or              -                               int8or  -       -       -                               -                               -                               f f 0   20              0       0               0       _null_ _null_ ));
 -DATA(insert ( 2242    n 0 bitand              -                               bitand  -       -       -                               -                               -                               f f 0   1560    0       0               0       _null_ _null_ ));
 -DATA(insert ( 2243    n 0 bitor               -                               bitor   -       -       -                               -                               -                               f f 0   1560    0       0               0       _null_ _null_ ));
++DATA(insert ( 2236    n 0 int2and             int2and         -                                       int2and -       -       -                               -                               -                               f f 0   21              21              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2237    n 0 int2or              int2or          -                                       int2or  -       -       -                               -                               -                               f f 0   21              21              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2238    n 0 int4and             int4and         -                                       int4and -       -       -                               -                               -                               f f 0   23              23              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2239    n 0 int4or              int4or          -                                       int4or  -       -       -                               -                               -                               f f 0   23              23              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2240    n 0 int8and             int8and         -                                       int8and -       -       -                               -                               -                               f f 0   20              20              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2241    n 0 int8or              int8or          -                                       int8or  -       -       -                               -                               -                               f f 0   20              20              0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2242    n 0 bitand              bitand          -                                       bitand  -       -       -                               -                               -                               f f 0   1560    1560    0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 2243    n 0 bitor               bitor           -                                       bitor   -       -       -                               -                               -                               f f 0   1560    1560    0       0               0       _null_ _null_ _null_ ));
  
  /* xml */
- DATA(insert ( 2901    n 0 xmlconcat2  -       -                                       -                               -                               -                               f f 0   142             0       0       0               0       _null_  _null_ _null_ ));
 -DATA(insert ( 2901    n 0 xmlconcat2  -                               -               -       -       -                               -                               -                               f f 0   142             0       0               0       _null_ _null_ ));
++DATA(insert ( 2901    n 0 xmlconcat2  -       -       -               -               -                                       -                               -                               -                               f f 0   142             0       0       0               0       _null_  _null_ _null_ ));
  
  /* array */
- DATA(insert ( 2335    n 0 array_agg_transfn   -       array_agg_finalfn                       -       -               -               t f 0   2281    0       0       0               0       _null_  _null_  _null_ ));
- DATA(insert ( 4053    n 0 array_agg_array_transfn -   array_agg_array_finalfn -       -               -               t f 0   2281    0       0       0               0       _null_  _null_  _null_ ));
 -DATA(insert ( 2335    n 0 array_agg_transfn           array_agg_finalfn               -       -       -       -               -                               -                               t f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 4053    n 0 array_agg_array_transfn array_agg_array_finalfn -   -       -       -               -                               -                               t f 0   2281    0       0               0       _null_ _null_ ));
++DATA(insert ( 2335    n 0 array_agg_transfn   -       array_agg_finalfn               -               -               -               -       -               -               t f 0   2281    0       0       0               0       _null_  _null_  _null_ ));
++DATA(insert ( 4053    n 0 array_agg_array_transfn -   array_agg_array_finalfn -       -               -               -       -               -               t f 0   2281    0       0       0               0       _null_  _null_  _null_ ));
  
  /* text */
- DATA(insert ( 3538    n 0 string_agg_transfn  -       string_agg_finalfn      -                               -                               -                               f f 0   2281    0       0       0               0       _null_  _null_ _null_ ));
 -DATA(insert ( 3538    n 0 string_agg_transfn  string_agg_finalfn      -       -       -       -                               -                               -                               f f 0   2281    0       0               0       _null_ _null_ ));
++DATA(insert ( 3538    n 0 string_agg_transfn  -       string_agg_finalfn      -                               -                               -               -               -               -     -               -               -                         f f 0   2281    0       0       0               0       _null_  _null_ _null_ ));
  
  /* bytea */
- DATA(insert ( 3545    n 0 bytea_string_agg_transfn    -       bytea_string_agg_finalfn        -                               -                               -               f f 0   2281    0       0       0               0       _null_  _null_ _null_ ));
 -DATA(insert ( 3545    n 0 bytea_string_agg_transfn    bytea_string_agg_finalfn        -       -       -       -                               -                               -               f f 0   2281    0       0               0       _null_ _null_ ));
++DATA(insert ( 3545    n 0 bytea_string_agg_transfn    -       bytea_string_agg_finalfn        -               -               -       -                               -       -               -               -                               -               f f 0   2281    0       0       0               0       _null_  _null_ _null_ ));
  
  /* json */
- DATA(insert ( 3175    n 0 json_agg_transfn    json_agg_collectfn json_agg_finalfn                     -                               -                               - f f 0 7028    7028    0       0       0       _null_ _null_ _null_ ));
- DATA(insert ( 3197    n 0 json_object_agg_transfn -   json_object_agg_finalfn -                               -                               -                               f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
 -DATA(insert ( 3175    n 0 json_agg_transfn    json_agg_finalfn                        -       -       -       -                               -                               -                               f f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3197    n 0 json_object_agg_transfn json_object_agg_finalfn -   -       -       -                               -                               -                               f f 0   2281    0       0               0       _null_ _null_ ));
++DATA(insert ( 3175    n 0 json_agg_transfn    json_agg_collectfn json_agg_finalfn                     -       -               -               -                               -               -               -               -                       - f f 0 7028    7028    0       0       0       _null_ _null_ _null_ ));
++DATA(insert ( 3197    n 0 json_object_agg_transfn -   json_object_agg_finalfn -                               -                               -               -               -               -      -               -               -                        f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
  
  /* jsonb */
- DATA(insert ( 3267    n 0 jsonb_agg_transfn   -       jsonb_agg_finalfn                               -                       -                               -                       f f 0   2281    0       0               0       0       _null_ _null_ _null_ ));
- DATA(insert ( 3270    n 0 jsonb_object_agg_transfn    - jsonb_object_agg_finalfn      -                       -                               -                       f f 0   2281    0       0               0       0       _null_ _null_ _null_));
 -DATA(insert ( 3267    n 0 jsonb_agg_transfn   jsonb_agg_finalfn                               -       -       -       -                               -                               -                       f f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3270    n 0 jsonb_object_agg_transfn jsonb_object_agg_finalfn   -       -       -       -                               -                               -                       f f 0   2281    0       0               0       _null_ _null_ ));
++DATA(insert ( 3267    n 0 jsonb_agg_transfn   -       jsonb_agg_finalfn                               -       -               -               -                       -                               -                       f f 0   2281    0       0               0       0       _null_ _null_ _null_ ));
++DATA(insert ( 3270    n 0 jsonb_object_agg_transfn    - jsonb_object_agg_finalfn      -       -               -               -                       -                               -                       f f 0   2281    0       0               0       0       _null_ _null_ _null_));
  
  /* ordered-set and hypothetical-set aggregates */
- DATA(insert ( 3972    o 1 ordered_set_transition                      -       percentile_disc_final                                   -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3974    o 1 ordered_set_transition                      -       percentile_cont_float8_final                    -               -               -               f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3976    o 1 ordered_set_transition                      -       percentile_cont_interval_final                  -               -               -               f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3978    o 1 ordered_set_transition                      -       percentile_disc_multi_final                             -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3980    o 1 ordered_set_transition                      -       percentile_cont_float8_multi_final              -               -               -               f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3982    o 1 ordered_set_transition                      -       percentile_cont_interval_multi_final    -               -               -               f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3984    o 0 ordered_set_transition                      -       mode_final                                                              -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3986    h 1 ordered_set_transition_multi        -       rank_final                                                              -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3988    h 1 ordered_set_transition_multi        -       percent_rank_final                                              -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3990    h 1 ordered_set_transition_multi        -       cume_dist_final                                                 -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
- DATA(insert ( 3992    h 1 ordered_set_transition_multi        -       dense_rank_final                                                -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
 -DATA(insert ( 3972    o 1 ordered_set_transition                      percentile_disc_final                                   -       -       -       -               -               -               t f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3974    o 1 ordered_set_transition                      percentile_cont_float8_final                    -       -       -       -               -               -               f f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3976    o 1 ordered_set_transition                      percentile_cont_interval_final                  -       -       -       -               -               -               f f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3978    o 1 ordered_set_transition                      percentile_disc_multi_final                             -       -       -       -               -               -               t f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3980    o 1 ordered_set_transition                      percentile_cont_float8_multi_final              -       -       -       -               -               -               f f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3982    o 1 ordered_set_transition                      percentile_cont_interval_multi_final    -       -       -       -               -               -               f f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3984    o 0 ordered_set_transition                      mode_final                                                              -       -       -       -               -               -               t f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3986    h 1 ordered_set_transition_multi        rank_final                                                              -       -       -       -               -               -               t f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3988    h 1 ordered_set_transition_multi        percent_rank_final                                              -       -       -       -               -               -               t f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3990    h 1 ordered_set_transition_multi        cume_dist_final                                                 -       -       -       -               -               -               t f 0   2281    0       0               0       _null_ _null_ ));
 -DATA(insert ( 3992    h 1 ordered_set_transition_multi        dense_rank_final                                                -       -       -       -               -               -               t f 0   2281    0       0               0       _null_ _null_ ));
--
++DATA(insert ( 3972    o 1 ordered_set_transition                      -       percentile_disc_final                   -               -               -                       -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3974    o 1 ordered_set_transition                      -       percentile_cont_float8_final    -               -               -                       -               -               -               f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3976    o 1 ordered_set_transition                      -       percentile_cont_interval_final  -               -               -                       -               -               -               f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3978    o 1 ordered_set_transition                      -       percentile_disc_multi_final             -               -               -                       -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3980    o 1 ordered_set_transition                      -       percentile_cont_float8_multi_final              -               -               -       -               -               -               f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3982    o 1 ordered_set_transition                      -       percentile_cont_interval_multi_final    -               -               -       -               -               -               f f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3984    o 0 ordered_set_transition                      -       mode_final                                                              -               -               -       -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3986    h 1 ordered_set_transition_multi        -       rank_final                                                              -               -               -       -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3988    h 1 ordered_set_transition_multi        -       percent_rank_final                                              -               -               -       -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3990    h 1 ordered_set_transition_multi        -       cume_dist_final                                                 -               -               -       -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
++DATA(insert ( 3992    h 1 ordered_set_transition_multi        -       dense_rank_final                                                -               -               -       -               -               -               t f 0   2281    0       0       0               0       _null_ _null_ _null_ ));
  
  /*
   * prototypes for functions in pg_aggregate.c
@@@ -342,10 -330,10 +350,13 @@@ extern ObjectAddress AggregateCreate(co
                                List *parameterDefaults,
                                Oid variadicArgType,
                                List *aggtransfnName,
 +#ifdef PGXC
 +                              List *aggcollectfnName,
 +#endif
                                List *aggfinalfnName,
+                               List *aggcombinefnName,
+                               List *aggserialfnName,
+                               List *aggdeserialfnName,
                                List *aggmtransfnName,
                                List *aggminvtransfnName,
                                List *aggmfinalfnName,
                                Oid aggmTransType,
                                int32 aggmTransSpace,
                                const char *agginitval,
-                               const char *aggminitval);
 +#ifdef XCP
 +                              const char *agginitcollect,
 +#endif
+                               const char *aggminitval,
+                               char proparallel);
  
  #endif   /* PG_AGGREGATE_H */
Simple merge
index fab145034f88b2fe6c7337f3d60829704fc410fc,c2e9725245c528048b9771d2e875436ccf5c61a7..308af498122ac5f97fcae0b0cbb426ccff374f8c
@@@ -5,8 -5,7 +5,8 @@@
   *      along with the relation's initial contents.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/catalog/pg_namespace.h
index 637566915f307d8449fcfa298602d5a7dc5fa5cd,af19c1a82b6c7e42378dc354fd753fbb15cb7870..bfc0f74480356bd65d4167a61bb05b111e66ba3b
@@@ -4,8 -4,7 +4,8 @@@
   *      definition of the system "procedure" relation (pg_proc)
   *      along with the relation's initial contents.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/catalog/pg_proc.h
@@@ -2499,141 -2438,155 +2439,145 @@@ DATA(insert OID = 1829 ( icregexnejoins
  DESCR("join selectivity of case-insensitive regex non-match");
  
  /* Aggregate-related functions */
- DATA(insert OID = 1830 (  float8_avg     PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_avg _null_ _null_ _null_ ));
+ DATA(insert OID = 1830 (  float8_avg     PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_avg _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2512 (  float8_var_pop   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_pop _null_ _null_ _null_ ));
+ DATA(insert OID = 2512 (  float8_var_pop   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_pop _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 1831 (  float8_var_samp  PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_samp _null_ _null_ _null_ ));
+ DATA(insert OID = 1831 (  float8_var_samp  PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_samp _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2513 (  float8_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_pop _null_ _null_ _null_ ));
+ DATA(insert OID = 2513 (  float8_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_pop _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 1832 (  float8_stddev_samp  PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_samp _null_ _null_ _null_ ));
+ DATA(insert OID = 1832 (  float8_stddev_samp  PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_samp _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 1833 (  numeric_accum    PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum _null_ _null_ _null_ ));
 -DATA(insert OID = 1833 (  numeric_accum    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum _null_ _null_ _null_ ));
++DATA(insert OID = 1833 (  numeric_accum    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 2858 (  numeric_avg_accum    PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_avg_accum _null_ _null_ _null_ ));
 -DATA(insert OID = 3341 (  numeric_combine      PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_combine _null_ _null_ _null_ ));
 -DESCR("aggregate combine function");
 -DATA(insert OID = 2858 (  numeric_avg_accum    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_avg_accum _null_ _null_ _null_ ));
++DATA(insert OID = 2858 (  numeric_avg_accum    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_avg_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3548 (  numeric_accum_inv    PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum_inv _null_ _null_ _null_ ));
 -DATA(insert OID = 3337 (  numeric_avg_combine  PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_combine _null_ _null_ _null_ ));
 -DESCR("aggregate combine function");
 -DATA(insert OID = 2740 (  numeric_avg_serialize    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_serialize _null_ _null_ _null_ ));
 -DESCR("aggregate serial function");
 -DATA(insert OID = 2741 (  numeric_avg_deserialize      PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_deserialize _null_ _null_ _null_ ));
 -DESCR("aggregate deserial function");
 -DATA(insert OID = 3335 (  numeric_serialize    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_serialize _null_ _null_ _null_ ));
 -DESCR("aggregate serial function");
 -DATA(insert OID = 3336 (  numeric_deserialize  PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ numeric_deserialize _null_ _null_ _null_ ));
 -DESCR("aggregate deserial function");
 -DATA(insert OID = 3548 (  numeric_accum_inv    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3548 (  numeric_accum_inv    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum_inv _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 1834 (  int2_accum     PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 21" _null_ _null_ _null_ _null_ _null_ int2_accum _null_ _null_ _null_ ));
 -DATA(insert OID = 1834 (  int2_accum     PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 21" _null_ _null_ _null_ _null_ _null_ int2_accum _null_ _null_ _null_ ));
++DATA(insert OID = 1834 (  int2_accum     PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 21" _null_ _null_ _null_ _null_ _null_ int2_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 1835 (  int4_accum     PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 23" _null_ _null_ _null_ _null_ _null_ int4_accum _null_ _null_ _null_ ));
 -DATA(insert OID = 1835 (  int4_accum     PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 23" _null_ _null_ _null_ _null_ _null_ int4_accum _null_ _null_ _null_ ));
++DATA(insert OID = 1835 (  int4_accum     PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 23" _null_ _null_ _null_ _null_ _null_ int4_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 1836 (  int8_accum     PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 20" _null_ _null_ _null_ _null_ _null_ int8_accum _null_ _null_ _null_ ));
 -DATA(insert OID = 1836 (  int8_accum     PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_accum _null_ _null_ _null_ ));
++DATA(insert OID = 1836 (  int8_accum     PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 20" _null_ _null_ _null_ _null_ _null_ int8_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 2746 (  int8_avg_accum         PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum _null_ _null_ _null_ ));
 -DATA(insert OID = 3338 (  numeric_poly_combine          PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_combine _null_ _null_ _null_ ));
 -DESCR("aggregate combine function");
 -DATA(insert OID = 3339 (  numeric_poly_serialize      PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_serialize _null_ _null_ _null_ ));
 -DESCR("aggregate serial function");
 -DATA(insert OID = 3340 (  numeric_poly_deserialize      PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_deserialize _null_ _null_ _null_ ));
 -DESCR("aggregate deserial function");
 -DATA(insert OID = 2746 (  int8_avg_accum         PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum _null_ _null_ _null_ ));
++DATA(insert OID = 2746 (  int8_avg_accum         PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3567 (  int2_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 21" _null_ _null_ _null_ _null_ _null_ int2_accum_inv _null_ _null_ _null_ ));
 -DATA(insert OID = 3567 (  int2_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 21" _null_ _null_ _null_ _null_ _null_ int2_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3567 (  int2_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 21" _null_ _null_ _null_ _null_ _null_ int2_accum_inv _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3568 (  int4_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 23" _null_ _null_ _null_ _null_ _null_ int4_accum_inv _null_ _null_ _null_ ));
 -DATA(insert OID = 3568 (  int4_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 23" _null_ _null_ _null_ _null_ _null_ int4_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3568 (  int4_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 23" _null_ _null_ _null_ _null_ _null_ int4_accum_inv _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3569 (  int8_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7018 "7018 20" _null_ _null_ _null_ _null_ _null_ int8_accum_inv _null_ _null_ _null_ ));
 -DATA(insert OID = 3569 (  int8_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3569 (  int8_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7018 "7018 20" _null_ _null_ _null_ _null_ _null_ int8_accum_inv _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3387 (  int8_avg_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 7019 "7019 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum_inv _null_ _null_ _null_ ));
 -DATA(insert OID = 3387 (  int8_avg_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum_inv _null_ _null_ _null_ ));
++DATA(insert OID = 3387 (  int8_avg_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 7019 "7019 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum_inv _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3178 (  numeric_sum    PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_sum _null_ _null_ _null_ ));
 -DATA(insert OID = 2785 (  int8_avg_combine      PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_combine _null_ _null_ _null_ ));
 -DESCR("aggregate combine function");
 -DATA(insert OID = 2786 (  int8_avg_serialize  PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ int8_avg_serialize _null_ _null_ _null_ ));
 -DESCR("aggregate serial function");
 -DATA(insert OID = 2787 (  int8_avg_deserialize          PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_deserialize _null_ _null_ _null_ ));
 -DESCR("aggregate deserial function");
 -DATA(insert OID = 3324 (  int4_avg_combine      PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 1016" _null_ _null_ _null_ _null_ _null_ int4_avg_combine _null_ _null_ _null_ ));
 -DESCR("aggregate combine function");
 -DATA(insert OID = 3178 (  numeric_sum    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_sum _null_ _null_ _null_ ));
++DATA(insert OID = 3178 (  numeric_sum    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_sum _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 1837 (  numeric_avg    PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_avg _null_ _null_ _null_ ));
 -DATA(insert OID = 1837 (  numeric_avg    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_avg _null_ _null_ _null_ ));
++DATA(insert OID = 1837 (  numeric_avg    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_avg _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2514 (  numeric_var_pop  PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_var_pop _null_ _null_ _null_ ));
 -DATA(insert OID = 2514 (  numeric_var_pop  PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_var_pop _null_ _null_ _null_ ));
++DATA(insert OID = 2514 (  numeric_var_pop  PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_var_pop _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 1838 (  numeric_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_var_samp _null_ _null_ _null_ ));
 -DATA(insert OID = 1838 (  numeric_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_var_samp _null_ _null_ _null_ ));
++DATA(insert OID = 1838 (  numeric_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_var_samp _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2596 (  numeric_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_  numeric_stddev_pop _null_ _null_ _null_ ));
 -DATA(insert OID = 2596 (  numeric_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_        numeric_stddev_pop _null_ _null_ _null_ ));
++DATA(insert OID = 2596 (  numeric_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_        numeric_stddev_pop _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 1839 (  numeric_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_stddev_samp _null_ _null_ _null_ ));
 -DATA(insert OID = 1839 (  numeric_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_stddev_samp _null_ _null_ _null_ ));
++DATA(insert OID = 1839 (  numeric_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7018" _null_ _null_ _null_ _null_ _null_ numeric_stddev_samp _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 1840 (  int2_sum               PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 20 "20 21" _null_ _null_ _null_ _null_ _null_ int2_sum _null_ _null_ _null_ ));
+ DATA(insert OID = 1840 (  int2_sum               PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 20 "20 21" _null_ _null_ _null_ _null_ _null_ int2_sum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 1841 (  int4_sum               PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int4_sum _null_ _null_ _null_ ));
+ DATA(insert OID = 1841 (  int4_sum               PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int4_sum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 1842 (  int8_sum               PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 1700 "1700 20" _null_ _null_ _null_ _null_ _null_ int8_sum _null_ _null_ _null_ ));
+ DATA(insert OID = 1842 (  int8_sum               PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 1700 "1700 20" _null_ _null_ _null_ _null_ _null_ int8_sum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3388 (  numeric_poly_sum       PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_sum _null_ _null_ _null_ ));
 -DATA(insert OID = 3388 (  numeric_poly_sum       PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_sum _null_ _null_ _null_ ));
++DATA(insert OID = 3388 (  numeric_poly_sum       PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_sum _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3389 (  numeric_poly_avg       PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_avg _null_ _null_ _null_ ));
 -DATA(insert OID = 3389 (  numeric_poly_avg       PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_avg _null_ _null_ _null_ ));
++DATA(insert OID = 3389 (  numeric_poly_avg       PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_avg _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3390 (  numeric_poly_var_pop        PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
 -DATA(insert OID = 3390 (  numeric_poly_var_pop        PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
++DATA(insert OID = 3390 (  numeric_poly_var_pop        PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3391 (  numeric_poly_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_samp _null_ _null_ _null_ ));
 -DATA(insert OID = 3391 (  numeric_poly_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_samp _null_ _null_ _null_ ));
++DATA(insert OID = 3391 (  numeric_poly_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_samp _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3392 (  numeric_poly_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_pop _null_ _null_ _null_ ));
 -DATA(insert OID = 3392 (  numeric_poly_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_pop _null_ _null_ _null_ ));
++DATA(insert OID = 3392 (  numeric_poly_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_pop _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3393 (  numeric_poly_stddev_samp    PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_samp _null_ _null_ _null_ ));
 -DATA(insert OID = 3393 (  numeric_poly_stddev_samp    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_samp _null_ _null_ _null_ ));
++DATA(insert OID = 3393 (  numeric_poly_stddev_samp    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_samp _null_ _null_ _null_ ));
  DESCR("aggregate final function");
  
- DATA(insert OID = 1843 (  interval_accum   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum _null_ _null_ _null_ ));
+ DATA(insert OID = 1843 (  interval_accum   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3549 (  interval_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum_inv _null_ _null_ _null_ ));
+ DATA(insert OID = 3325 (  interval_combine     PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_combine _null_ _null_ _null_ ));
+ DESCR("aggregate combine function");
+ DATA(insert OID = 3549 (  interval_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum_inv _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 1844 (  interval_avg           PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1186 "1187" _null_ _null_ _null_ _null_ _null_ interval_avg _null_ _null_ _null_ ));
+ DATA(insert OID = 1844 (  interval_avg           PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1186 "1187" _null_ _null_ _null_ _null_ _null_ interval_avg _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 1962 (  int2_avg_accum   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum _null_ _null_ _null_ ));
+ DATA(insert OID = 1962 (  int2_avg_accum   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 1963 (  int4_avg_accum   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum _null_ _null_ _null_ ));
+ DATA(insert OID = 1963 (  int4_avg_accum   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3570 (  int2_avg_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum_inv _null_ _null_ _null_ ));
+ DATA(insert OID = 3570 (  int2_avg_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum_inv _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3571 (  int4_avg_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum_inv _null_ _null_ _null_ ));
+ DATA(insert OID = 3571 (  int4_avg_accum_inv   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum_inv _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 1964 (  int8_avg               PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1700 "1016" _null_ _null_ _null_ _null_ _null_ int8_avg _null_ _null_ _null_ ));
+ DATA(insert OID = 1964 (  int8_avg               PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1700 "1016" _null_ _null_ _null_ _null_ _null_ int8_avg _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3572 (  int2int4_sum           PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "1016" _null_ _null_ _null_ _null_ _null_ int2int4_sum _null_ _null_ _null_ ));
+ DATA(insert OID = 3572 (  int2int4_sum           PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "1016" _null_ _null_ _null_ _null_ _null_ int2int4_sum _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2805 (  int8inc_float8_float8               PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 20 "20 701 701" _null_ _null_ _null_ _null_ _null_ int8inc_float8_float8 _null_ _null_ _null_ ));
+ DATA(insert OID = 2805 (  int8inc_float8_float8               PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 20 "20 701 701" _null_ _null_ _null_ _null_ _null_ int8inc_float8_float8 _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 2806 (  float8_regr_accum                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 1022 "1022 701 701" _null_ _null_ _null_ _null_ _null_ float8_regr_accum _null_ _null_ _null_ ));
+ DATA(insert OID = 2806 (  float8_regr_accum                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 1022 "1022 701 701" _null_ _null_ _null_ _null_ _null_ float8_regr_accum _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 2807 (  float8_regr_sxx                     PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxx _null_ _null_ _null_ ));
+ DATA(insert OID = 3342 (  float8_regr_combine         PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_regr_combine _null_ _null_ _null_ ));
+ DESCR("aggregate combine function");
+ DATA(insert OID = 2807 (  float8_regr_sxx                     PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxx _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2808 (  float8_regr_syy                     PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_syy _null_ _null_ _null_ ));
+ DATA(insert OID = 2808 (  float8_regr_syy                     PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_syy _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2809 (  float8_regr_sxy                     PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxy _null_ _null_ _null_ ));
+ DATA(insert OID = 2809 (  float8_regr_sxy                     PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxy _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2810 (  float8_regr_avgx                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgx _null_ _null_ _null_ ));
+ DATA(insert OID = 2810 (  float8_regr_avgx                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgx _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2811 (  float8_regr_avgy                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgy _null_ _null_ _null_ ));
+ DATA(insert OID = 2811 (  float8_regr_avgy                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgy _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2812 (  float8_regr_r2                      PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_r2 _null_ _null_ _null_ ));
+ DATA(insert OID = 2812 (  float8_regr_r2                      PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_r2 _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2813 (  float8_regr_slope                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_slope _null_ _null_ _null_ ));
+ DATA(insert OID = 2813 (  float8_regr_slope                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_slope _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2814 (  float8_regr_intercept               PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_intercept _null_ _null_ _null_ ));
+ DATA(insert OID = 2814 (  float8_regr_intercept               PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_intercept _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2815 (  float8_covar_pop                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_pop _null_ _null_ _null_ ));
+ DATA(insert OID = 2815 (  float8_covar_pop                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_pop _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2816 (  float8_covar_samp                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_samp _null_ _null_ _null_ ));
+ DATA(insert OID = 2816 (  float8_covar_samp                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_samp _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 2817 (  float8_corr                         PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_corr _null_ _null_ _null_ ));
+ DATA(insert OID = 2817 (  float8_corr                         PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_corr _null_ _null_ _null_ ));
  DESCR("aggregate final function");
  
- DATA(insert OID = 3535 (  string_agg_transfn          PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 2281 "2281 25 25" _null_ _null_ _null_ _null_ _null_ string_agg_transfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3535 (  string_agg_transfn          PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 2281 "2281 25 25" _null_ _null_ _null_ _null_ _null_ string_agg_transfn _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 7000 (  float8_collect                      PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_collect _null_ _null_ _null_ ));
 +#ifdef PGXC
- DATA(insert OID = 7002 (  numeric_collect                     PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 7018 "7018 7018" _null_ _null_ _null_ _null_ _null_ numeric_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7000 (  float8_collect                      PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_collect _null_ _null_ _null_ ));
 +DESCR("aggregate collection function");
- DATA(insert OID = 7013 (  numeric_poly_collect                        PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 7019 "7019 7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7002 (  numeric_collect                     PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 7018 "7018 7018" _null_ _null_ _null_ _null_ _null_ numeric_collect _null_ _null_ _null_ ));
 +DESCR("aggregate collection function");
- DATA(insert OID = 7003 (  interval_collect                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7013 (  numeric_poly_collect                        PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 7019 "7019 7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_collect _null_ _null_ _null_ ));
 +DESCR("aggregate poly_collection function");
- DATA(insert OID = 7004 (  int8_avg_collect                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1016 "1016 1016" _null_ _null_ _null_ _null_ _null_ int8_avg_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7003 (  interval_collect                    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_collect _null_ _null_ _null_ ));
 +DESCR("aggregate transition function");
- DATA(insert OID = 7005 (  int8_sum_to_int8                    PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8_sum_to_int8 _null_ _null_ _null_ ));
++DATA(insert OID = 7004 (  int8_avg_collect                    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 1016" _null_ _null_ _null_ _null_ _null_ int8_avg_collect _null_ _null_ _null_ ));
 +DESCR("AVG(int) collection function");
- DATA(insert OID = 7006 (  float8_regr_collect         PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_regr_collect _null_ _null_ _null_ ));
++DATA(insert OID = 7005 (  int8_sum_to_int8                    PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8_sum_to_int8 _null_ _null_ _null_ ));
 +DESCR("SUM(int) collection function");
- DATA(insert OID = 3536 (  string_agg_finalfn          PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 25 "2281" _null_ _null_ _null_ _null_ _null_ string_agg_finalfn _null_ _null_ _null_ ));
++DATA(insert OID = 7006 (  float8_regr_collect         PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_regr_collect _null_ _null_ _null_ ));
 +DESCR("REGR_...(double, double) collection function");
 +#endif
+ DATA(insert OID = 3536 (  string_agg_finalfn          PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 25 "2281" _null_ _null_ _null_ _null_ _null_ string_agg_finalfn _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3538 (  string_agg                          PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3538 (  string_agg                          PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
  DESCR("concatenate aggregate input into a string");
- DATA(insert OID = 3543 (  bytea_string_agg_transfn    PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 2281 "2281 17 17" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_transfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3543 (  bytea_string_agg_transfn    PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 2281 "2281 17 17" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_transfn _null_ _null_ _null_ ));
  DESCR("aggregate transition function");
- DATA(insert OID = 3544 (  bytea_string_agg_finalfn    PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_finalfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3544 (  bytea_string_agg_finalfn    PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_finalfn _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3545 (  string_agg                          PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0 17 "17 17" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3545 (  string_agg                          PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0 17 "17 17" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
  DESCR("concatenate aggregate input into a bytea");
  
  /* To ASCII conversion */
@@@ -4387,82 -4347,80 +4338,82 @@@ DATA(insert OID = 3053 (  xml_is_well_f
  DESCR("determine if a string is well formed XML content");
  
  /* json */
- DATA(insert OID = 321 (  json_in                 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "2275" _null_ _null_ _null_ _null_ _null_ json_in _null_ _null_ _null_ ));
+ DATA(insert OID = 321 (  json_in                 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "2275" _null_ _null_ _null_ _null_ _null_ json_in _null_ _null_ _null_ ));
  DESCR("I/O");
- DATA(insert OID = 322 (  json_out                PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "114" _null_ _null_ _null_ _null_ _null_ json_out _null_ _null_ _null_ ));
+ DATA(insert OID = 322 (  json_out                PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "114" _null_ _null_ _null_ _null_ _null_ json_out _null_ _null_ _null_ ));
  DESCR("I/O");
- DATA(insert OID = 323 (  json_recv               PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_       json_recv _null_ _null_ _null_ ));
+ DATA(insert OID = 323 (  json_recv               PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_recv _null_ _null_ _null_ ));
  DESCR("I/O");
- DATA(insert OID = 324 (  json_send               PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "114" _null_ _null_ _null_ _null_ _null_ json_send _null_ _null_ _null_ ));
+ DATA(insert OID = 324 (  json_send               PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "114" _null_ _null_ _null_ _null_ _null_ json_send _null_ _null_ _null_ ));
  DESCR("I/O");
- DATA(insert OID = 3153 (  array_to_json    PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2277" _null_ _null_ _null_ _null_ _null_ array_to_json _null_ _null_ _null_ ));
+ DATA(insert OID = 3153 (  array_to_json    PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2277" _null_ _null_ _null_ _null_ _null_ array_to_json _null_ _null_ _null_ ));
  DESCR("map array to json");
- DATA(insert OID = 3154 (  array_to_json    PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "2277 16" _null_ _null_ _null_ _null_ _null_ array_to_json_pretty _null_ _null_ _null_ ));
+ DATA(insert OID = 3154 (  array_to_json    PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "2277 16" _null_ _null_ _null_ _null_ _null_ array_to_json_pretty _null_ _null_ _null_ ));
  DESCR("map array to json with optional pretty printing");
- DATA(insert OID = 3155 (  row_to_json    PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2249" _null_ _null_ _null_ _null_ _null_ row_to_json _null_ _null_ _null_ ));
+ DATA(insert OID = 3155 (  row_to_json    PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2249" _null_ _null_ _null_ _null_ _null_ row_to_json _null_ _null_ _null_ ));
  DESCR("map row to json");
- DATA(insert OID = 3156 (  row_to_json    PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "2249 16" _null_ _null_ _null_ _null_ _null_ row_to_json_pretty _null_ _null_ _null_ ));
+ DATA(insert OID = 3156 (  row_to_json    PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "2249 16" _null_ _null_ _null_ _null_ _null_ row_to_json_pretty _null_ _null_ _null_ ));
  DESCR("map row to json with optional pretty printing");
- DATA(insert OID = 3173 (  json_agg_transfn     PGNSP PGUID 12 1 0 0 0 f f f f f f s 2 0 7028 "7028 2283" _null_ _null_ _null_ _null_ _null_ json_agg_transfn _null_ _null_ _null_ ));
 -DATA(insert OID = 3173 (  json_agg_transfn     PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ _null_ json_agg_transfn _null_ _null_ _null_ ));
++DATA(insert OID = 3173 (  json_agg_transfn     PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 7028 "7028 2283" _null_ _null_ _null_ _null_ _null_ json_agg_transfn _null_ _null_ _null_ ));
  DESCR("json aggregate transition function");
- DATA(insert OID = 7029 (  json_agg_collectfn   PGNSP PGUID 12 1 0 0 0 f f f f f f s 2 0 7028 "7028 7028" _null_ _null_ _null_ _null_ _null_ json_agg_collectfn _null_ _null_ _null_ ));
 -DATA(insert OID = 3174 (  json_agg_finalfn     PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_agg_finalfn _null_ _null_ _null_ ));
++DATA(insert OID = 7029 (  json_agg_collectfn   PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 7028 "7028 7028" _null_ _null_ _null_ _null_ _null_ json_agg_collectfn _null_ _null_ _null_ ));
 +DESCR("json aggregate collection function");
- DATA(insert OID = 3174 (  json_agg_finalfn     PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 114 "7028" _null_ _null_ _null_ _null_ _null_ json_agg_finalfn _null_ _null_ _null_ ));
++DATA(insert OID = 3174 (  json_agg_finalfn     PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 114 "7028" _null_ _null_ _null_ _null_ _null_ json_agg_finalfn _null_ _null_ _null_ ));
  DESCR("json aggregate final function");
- DATA(insert OID = 3175 (  json_agg               PGNSP PGUID 12 1 0 0 0 t f f f f f s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3175 (  json_agg               PGNSP PGUID 12 1 0 0 0 t f f f f f s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
  DESCR("aggregate input into json");
- DATA(insert OID = 3180 (  json_object_agg_transfn      PGNSP PGUID 12 1 0 0 0 f f f f f f s 3 0 2281 "2281 2276 2276" _null_ _null_ _null_ _null_ _null_ json_object_agg_transfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3180 (  json_object_agg_transfn      PGNSP PGUID 12 1 0 0 0 f f f f f f s 3 0 2281 "2281 2276 2276" _null_ _null_ _null_ _null_ _null_ json_object_agg_transfn _null_ _null_ _null_ ));
  DESCR("json object aggregate transition function");
- DATA(insert OID = 3196 (  json_object_agg_finalfn      PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_object_agg_finalfn _null_ _null_ _null_ ));
+ DATA(insert OID = 3196 (  json_object_agg_finalfn      PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_object_agg_finalfn _null_ _null_ _null_ ));
  DESCR("json object aggregate final function");
- DATA(insert OID = 3197 (  json_object_agg                PGNSP PGUID 12 1 0 0 0 t f f f f f s 2 0 114 "2276 2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3197 (  json_object_agg                PGNSP PGUID 12 1 0 0 0 t f f f f f s 2 0 114 "2276 2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
  DESCR("aggregate input into a json object");
- DATA(insert OID = 3198 (  json_build_array       PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_array _null_ _null_ _null_ ));
+ DATA(insert OID = 3198 (  json_build_array       PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_array _null_ _null_ _null_ ));
  DESCR("build a json array from any inputs");
- DATA(insert OID = 3199 (  json_build_array       PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 114  "" _null_ _null_ _null_ _null_ _null_ json_build_array_noargs _null_ _null_ _null_ ));
+ DATA(insert OID = 3199 (  json_build_array       PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 114  "" _null_ _null_ _null_ _null_ _null_ json_build_array_noargs _null_ _null_ _null_ ));
  DESCR("build an empty json array");
- DATA(insert OID = 3200 (  json_build_object    PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_object _null_ _null_ _null_ ));
+ DATA(insert OID = 3200 (  json_build_object    PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_object _null_ _null_ _null_ ));
  DESCR("build a json object from pairwise key/value inputs");
- DATA(insert OID = 3201 (  json_build_object    PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 114  "" _null_ _null_ _null_ _null_ _null_ json_build_object_noargs _null_ _null_ _null_ ));
+ DATA(insert OID = 3201 (  json_build_object    PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 114  "" _null_ _null_ _null_ _null_ _null_ json_build_object_noargs _null_ _null_ _null_ ));
  DESCR("build an empty json object");
- DATA(insert OID = 3202 (  json_object  PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "1009" _null_ _null_ _null_ _null_ _null_ json_object _null_ _null_ _null_ ));
+ DATA(insert OID = 3202 (  json_object  PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "1009" _null_ _null_ _null_ _null_ _null_ json_object _null_ _null_ _null_ ));
  DESCR("map text array of key value pairs to json object");
- DATA(insert OID = 3203 (  json_object  PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "1009 1009" _null_ _null_ _null_ _null_ _null_ json_object_two_arg _null_ _null_ _null_ ));
+ DATA(insert OID = 3203 (  json_object  PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "1009 1009" _null_ _null_ _null_ _null_ _null_ json_object_two_arg _null_ _null_ _null_ ));
  DESCR("map text arrays of keys and values to json object");
- DATA(insert OID = 3176 (  to_json        PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ to_json _null_ _null_ _null_ ));
+ DATA(insert OID = 3176 (  to_json        PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ to_json _null_ _null_ _null_ ));
  DESCR("map input to json");
- DATA(insert OID = 3261 (  json_strip_nulls       PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "114" _null_ _null_ _null_ _null_ _null_ json_strip_nulls _null_ _null_ _null_ ));
+ DATA(insert OID = 3261 (  json_strip_nulls       PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 114 "114" _null_ _null_ _null_ _null_ _null_ json_strip_nulls _null_ _null_ _null_ ));
  DESCR("remove object fields with null values from json");
  
- DATA(insert OID = 3947 (  json_object_field                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field _null_ _null_ _null_ ));
- DATA(insert OID = 3948 (  json_object_field_text      PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25  "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field_text _null_ _null_ _null_ ));
- DATA(insert OID = 3949 (  json_array_element          PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element _null_ _null_ _null_ ));
- DATA(insert OID = 3950 (  json_array_element_text     PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25  "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element_text _null_ _null_ _null_ ));
- DATA(insert OID = 3951 (  json_extract_path                   PGNSP PGUID 12 1 0 25 0 f f f f t f i 2 0 114 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path _null_ _null_ _null_ ));
+ DATA(insert OID = 3947 (  json_object_field                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field _null_ _null_ _null_ ));
+ DATA(insert OID = 3948 (  json_object_field_text      PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25  "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3949 (  json_array_element          PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element _null_ _null_ _null_ ));
+ DATA(insert OID = 3950 (  json_array_element_text     PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25  "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3951 (  json_extract_path                   PGNSP PGUID 12 1 0 25 0 f f f f t f i 2 0 114 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path _null_ _null_ _null_ ));
  DESCR("get value from json with path elements");
- DATA(insert OID = 3953 (  json_extract_path_text      PGNSP PGUID 12 1 0 25 0 f f f f t f i 2 0 25 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3953 (  json_extract_path_text      PGNSP PGUID 12 1 0 25 0 f f f f t f i 2 0 25 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path_text _null_ _null_ _null_ ));
  DESCR("get value from json as text with path elements");
- DATA(insert OID = 3955 (  json_array_elements         PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 114 "114" "{114,114}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements _null_ _null_ _null_ ));
+ DATA(insert OID = 3955 (  json_array_elements         PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 114 "114" "{114,114}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements _null_ _null_ _null_ ));
  DESCR("key value pairs of a json object");
- DATA(insert OID = 3969 (  json_array_elements_text    PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 25 "114" "{114,25}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3969 (  json_array_elements_text    PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 25 "114" "{114,25}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements_text _null_ _null_ _null_ ));
  DESCR("elements of json array");
- DATA(insert OID = 3956 (  json_array_length                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "114" _null_ _null_ _null_ _null_ _null_ json_array_length _null_ _null_ _null_ ));
+ DATA(insert OID = 3956 (  json_array_length                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "114" _null_ _null_ _null_ _null_ _null_ json_array_length _null_ _null_ _null_ ));
  DESCR("length of json array");
- DATA(insert OID = 3957 (  json_object_keys                    PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_object_keys _null_ _null_ _null_ ));
+ DATA(insert OID = 3957 (  json_object_keys                    PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_object_keys _null_ _null_ _null_ ));
  DESCR("get json object keys");
- DATA(insert OID = 3958 (  json_each                              PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,114}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each _null_ _null_ _null_ ));
+ DATA(insert OID = 3958 (  json_each                              PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,114}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each _null_ _null_ _null_ ));
  DESCR("key value pairs of a json object");
- DATA(insert OID = 3959 (  json_each_text                 PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,25}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each_text _null_ _null_ _null_ ));
+ DATA(insert OID = 3959 (  json_each_text                 PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,25}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each_text _null_ _null_ _null_ ));
  DESCR("key value pairs of a json object");
- DATA(insert OID = 3960 (  json_populate_record           PGNSP PGUID 12 1 0 0 0 f f f f f f s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_record _null_ _null_ _null_ ));
+ DATA(insert OID = 3960 (  json_populate_record           PGNSP PGUID 12 1 0 0 0 f f f f f f s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_record _null_ _null_ _null_ ));
  DESCR("get record fields from a json object");
- DATA(insert OID = 3961 (  json_populate_recordset  PGNSP PGUID 12 1 100 0 0 f f f f f t s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_recordset _null_ _null_ _null_ ));
+ DATA(insert OID = 3961 (  json_populate_recordset  PGNSP PGUID 12 1 100 0 0 f f f f f t s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_recordset _null_ _null_ _null_ ));
  DESCR("get set of records with fields from a json array of objects");
- DATA(insert OID = 3204 (  json_to_record                 PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_record _null_ _null_ _null_ ));
+ DATA(insert OID = 3204 (  json_to_record                 PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_record _null_ _null_ _null_ ));
  DESCR("get record fields from a json object");
- DATA(insert OID = 3205 (  json_to_recordset              PGNSP PGUID 12 1 100 0 0 f f f f f t s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_recordset _null_ _null_ _null_ ));
+ DATA(insert OID = 3205 (  json_to_recordset              PGNSP PGUID 12 1 100 0 0 f f f f f t s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_recordset _null_ _null_ _null_ ));
  DESCR("get set of records with fields from a json array of objects");
- DATA(insert OID = 3968 (  json_typeof                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_typeof _null_ _null_ _null_ ));
+ DATA(insert OID = 3968 (  json_typeof                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_typeof _null_ _null_ _null_ ));
  DESCR("get the type of a json value");
  
  /* uuid */
@@@ -5270,129 -5227,105 +5220,154 @@@ DATA(insert OID = 3985 ( mode_final                               
  DESCR("aggregate final function");
  
  /* hypothetical-set aggregates (and their support functions) */
- DATA(insert OID = 3986 ( rank                         PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_       aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3986 ( rank                         PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
  DESCR("rank of hypothetical row");
- DATA(insert OID = 3987 ( rank_final                   PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_   hypothetical_rank_final _null_ _null_ _null_ ));
+ DATA(insert OID = 3987 ( rank_final                   PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_rank_final _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3988 ( percent_rank         PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3988 ( percent_rank         PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
  DESCR("fractional rank of hypothetical row");
- DATA(insert OID = 3989 ( percent_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_percent_rank_final _null_ _null_ _null_ ));
+ DATA(insert OID = 3989 ( percent_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_percent_rank_final _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3990 ( cume_dist                    PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3990 ( cume_dist                    PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
  DESCR("cumulative distribution of hypothetical row");
- DATA(insert OID = 3991 ( cume_dist_final      PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_cume_dist_final _null_ _null_ _null_ ));
+ DATA(insert OID = 3991 ( cume_dist_final      PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_cume_dist_final _null_ _null_ _null_ ));
  DESCR("aggregate final function");
- DATA(insert OID = 3992 ( dense_rank                   PGNSP PGUID 12 1 0 2276 0 t f f f f f i 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_       aggregate_dummy _null_ _null_ _null_ ));
+ DATA(insert OID = 3992 ( dense_rank                   PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
  DESCR("rank of hypothetical row without gaps");
- DATA(insert OID = 3993 ( dense_rank_final     PGNSP PGUID 12 1 0 2276 0 f f f f f f i 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_   hypothetical_dense_rank_final _null_ _null_ _null_ ));
+ DATA(insert OID = 3993 ( dense_rank_final     PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_dense_rank_final _null_ _null_ _null_ ));
  DESCR("aggregate final function");
  
 +#ifdef PGXC
 +DATA(insert OID = 7007 ( pgxc_pool_check      PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pgxc_pool_check _null_ _null_ _null_ ));
 +DESCR("check connection information consistency in pooler");
 +DATA(insert OID = 7008 ( pgxc_pool_reload     PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pgxc_pool_reload _null_ _null_ _null_ ));
 +DESCR("reload connection information in pooler and reload server sessions");
 +DATA(insert OID = 7009 ( pgxc_node_str                PGNSP PGUID 12 1 0 0 0 f f f f t f s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ pgxc_node_str _null_ _null_ _null_ ));
 +DESCR("get the name of the node");
 +DATA(insert OID = 7010 (  pgxc_is_committed   PGNSP PGUID 12 1 1 0 0 f f f f t t s 1 0 16 "28" _null_ _null_ _null_ _null_ _null_ pgxc_is_committed _null_ _null_ _null_ ));
 +DESCR("is given GXID committed or aborted?");
 +DATA(insert OID = 7024 (  pgxc_is_inprogress  PGNSP PGUID 12 1 1 0 0 f f f f t t s 1 0 16 "28" _null_ _null_ _null_ _null_ _null_ pgxc_is_inprogress _null_ _null_ _null_ ));
 +DESCR("is given GXID in progress?");
 +DATA(insert OID = 7011 ( pgxc_lock_for_backup PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pgxc_lock_for_backup _null_ _null_ _null_ ));
 +DESCR("lock the cluster for taking backup");
 +DATA(insert OID = 7014 ( numeric_agg_state_in                         PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7018 "2275" _null_ _null_ _null_ _null_ _null_ numeric_agg_state_in _null_ _null_ _null_ ));
 +DESCR("I/O");
 +DATA(insert OID = 7015 ( numeric_agg_state_out                        PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "7018" _null_ _null_ _null_ _null_ _null_ numeric_agg_state_out _null_ _null_ _null_ ));
 +DESCR("I/O");
 +DATA(insert OID = 7016 (  numeric_agg_state_recv                 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7018 "2281" _null_ _null_ _null_ _null_ _null_ numeric_agg_state_recv _null_ _null_ _null_ ));
 +DESCR("I/O");
 +DATA(insert OID = 7017 (  numeric_agg_state_send                 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "7018" _null_ _null_ _null_ _null_ _null_ numeric_agg_state_send _null_ _null_ _null_ ));
 +DESCR("I/O");
 +DATA(insert OID = 7020 ( numeric_poly_agg_state_in                            PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7019 "2275" _null_ _null_ _null_ _null_ _null_ numeric_poly_agg_state_in _null_ _null_ _null_ ));
 +DESCR("I/O");
 +DATA(insert OID = 7021 ( numeric_poly_agg_state_out                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_agg_state_out _null_ _null_ _null_ ));
 +DESCR("I/O");
 +DATA(insert OID = 7022 (  numeric_poly_agg_state_recv            PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7019 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_agg_state_recv _null_ _null_ _null_ ));
 +DESCR("I/O");
 +DATA(insert OID = 7023 (  numeric_poly_agg_state_send            PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "7019" _null_ _null_ _null_ _null_ _null_ numeric_poly_agg_state_send _null_ _null_ _null_ ));
 +DESCR("I/O");
 +DATA(insert OID = 7030 ( json_agg_state_in                            PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 7028 "2275" _null_ _null_ _null_ _null_ _null_ json_agg_state_in _null_ _null_ _null_ ));
 +DESCR("I/O");
 +DATA(insert OID = 7025 ( json_agg_state_out                   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "7028" _null_ _null_ _null_ _null_ _null_ json_agg_state_out _null_ _null_ _null_ ));
 +DESCR("I/O");
 +#endif
  /* pg_upgrade support */
- DATA(insert OID = 3582 ( binary_upgrade_set_next_pg_type_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_type_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3582 ( binary_upgrade_set_next_pg_type_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_type_oid _null_ _null_ _null_ ));
  DESCR("for use by pg_upgrade");
- DATA(insert OID = 3584 ( binary_upgrade_set_next_array_pg_type_oid PGNSP PGUID        12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_array_pg_type_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3584 ( binary_upgrade_set_next_array_pg_type_oid PGNSP PGUID        12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_array_pg_type_oid _null_ _null_ _null_ ));
  DESCR("for use by pg_upgrade");
- DATA(insert OID = 3585 ( binary_upgrade_set_next_toast_pg_type_oid PGNSP PGUID        12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_type_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3585 ( binary_upgrade_set_next_toast_pg_type_oid PGNSP PGUID        12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_type_oid _null_ _null_ _null_ ));
  DESCR("for use by pg_upgrade");
- DATA(insert OID = 3586 ( binary_upgrade_set_next_heap_pg_class_oid PGNSP PGUID        12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_heap_pg_class_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3586 ( binary_upgrade_set_next_heap_pg_class_oid PGNSP PGUID        12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_heap_pg_class_oid _null_ _null_ _null_ ));
  DESCR("for use by pg_upgrade");
- DATA(insert OID = 3587 ( binary_upgrade_set_next_index_pg_class_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_index_pg_class_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3587 ( binary_upgrade_set_next_index_pg_class_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_index_pg_class_oid _null_ _null_ _null_ ));
  DESCR("for use by pg_upgrade");
- DATA(insert OID = 3588 ( binary_upgrade_set_next_toast_pg_class_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_class_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3588 ( binary_upgrade_set_next_toast_pg_class_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_class_oid _null_ _null_ _null_ ));
  DESCR("for use by pg_upgrade");
- DATA(insert OID = 3589 ( binary_upgrade_set_next_pg_enum_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_enum_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3589 ( binary_upgrade_set_next_pg_enum_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_enum_oid _null_ _null_ _null_ ));
  DESCR("for use by pg_upgrade");
- DATA(insert OID = 3590 ( binary_upgrade_set_next_pg_authid_oid PGNSP PGUID    12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_authid_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 3590 ( binary_upgrade_set_next_pg_authid_oid PGNSP PGUID    12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_authid_oid _null_ _null_ _null_ ));
  DESCR("for use by pg_upgrade");
- DATA(insert OID = 3591 ( binary_upgrade_create_empty_extension PGNSP PGUID    12 1 0 0 0 f f f f f f v 7 0 2278 "25 25 16 25 1028 1009 1009" _null_ _null_ _null_ _null_ _null_ binary_upgrade_create_empty_extension _null_ _null_ _null_ ));
+ DATA(insert OID = 3591 ( binary_upgrade_create_empty_extension PGNSP PGUID    12 1 0 0 0 f f f f f f v r 7 0 2278 "25 25 16 25 1028 1009 1009" _null_ _null_ _null_ _null_ _null_ binary_upgrade_create_empty_extension _null_ _null_ _null_ ));
+ DESCR("for use by pg_upgrade");
+ DATA(insert OID = 4083 ( binary_upgrade_set_record_init_privs PGNSP PGUID     12 1 0 0 0 f f f f t f v r 1 0 2278 "16" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_record_init_privs _null_ _null_ _null_ ));
  DESCR("for use by pg_upgrade");
  
  /* replication/origin.h */
- DATA(insert OID = 6003 ( pg_replication_origin_create PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_create _null_ _null_ _null_ ));
+ DATA(insert OID = 6003 ( pg_replication_origin_create PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_create _null_ _null_ _null_ ));
  DESCR("create a replication origin");
  
- DATA(insert OID = 6004 ( pg_replication_origin_drop PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_drop _null_ _null_ _null_ ));
+ DATA(insert OID = 6004 ( pg_replication_origin_drop PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_drop _null_ _null_ _null_ ));
  DESCR("drop replication origin identified by its name");
  
- DATA(insert OID = 6005 ( pg_replication_origin_oid PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_oid _null_ _null_ _null_ ));
+ DATA(insert OID = 6005 ( pg_replication_origin_oid PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_oid _null_ _null_ _null_ ));
  DESCR("translate the replication origin's name to its id");
  
- DATA(insert OID = 6006 ( pg_replication_origin_session_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_setup _null_ _null_ _null_ ));
+ DATA(insert OID = 6006 ( pg_replication_origin_session_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_setup _null_ _null_ _null_ ));
  DESCR("configure session to maintain replication progress tracking for the passed in origin");
  
- DATA(insert OID = 6007 ( pg_replication_origin_session_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_reset _null_ _null_ _null_ ));
+ DATA(insert OID = 6007 ( pg_replication_origin_session_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_reset _null_ _null_ _null_ ));
  DESCR("teardown configured replication progress tracking");
  
- DATA(insert OID = 6008 ( pg_replication_origin_session_is_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_is_setup _null_ _null_ _null_ ));
+ DATA(insert OID = 6008 ( pg_replication_origin_session_is_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_is_setup _null_ _null_ _null_ ));
  DESCR("is a replication origin configured in this session");
  
- DATA(insert OID = 6009 ( pg_replication_origin_session_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 3220 "16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_progress _null_ _null_ _null_ ));
+ DATA(insert OID = 6009 ( pg_replication_origin_session_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 3220 "16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_progress _null_ _null_ _null_ ));
  DESCR("get the replication progress of the current session");
  
- DATA(insert OID = 6010 ( pg_replication_origin_xact_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2278 "3220 1184" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_setup _null_ _null_ _null_ ));
+ DATA(insert OID = 6010 ( pg_replication_origin_xact_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2278 "3220 1184" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_setup _null_ _null_ _null_ ));
  DESCR("setup the transaction's origin lsn and timestamp");
  
- DATA(insert OID = 6011 ( pg_replication_origin_xact_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2278 "3220 1184" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_reset _null_ _null_ _null_ ));
+ DATA(insert OID = 6011 ( pg_replication_origin_xact_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v r 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_reset _null_ _null_ _null_ ));
  DESCR("reset the transaction's origin lsn and timestamp");
  
- DATA(insert OID = 6012 ( pg_replication_origin_advance PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2278 "25 3220" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_advance _null_ _null_ _null_ ));
+ DATA(insert OID = 6012 ( pg_replication_origin_advance PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2278 "25 3220" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_advance _null_ _null_ _null_ ));
  DESCR("advance replication itentifier to specific location");
  
- DATA(insert OID = 6013 ( pg_replication_origin_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 3220 "25 16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_progress _null_ _null_ _null_ ));
+ DATA(insert OID = 6013 ( pg_replication_origin_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 3220 "25 16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_progress _null_ _null_ _null_ ));
  DESCR("get an individual replication origin's replication progress");
  
- DATA(insert OID = 6014 ( pg_show_replication_origin_status PGNSP PGUID 12 1 100 0 0 f f f f f t v 0 0 2249 "" "{26,25,3220,3220}" "{o,o,o,o}" "{local_id, external_id, remote_lsn, local_lsn}" _null_ _null_ pg_show_replication_origin_status _null_ _null_ _null_ ));
+ DATA(insert OID = 6014 ( pg_show_replication_origin_status PGNSP PGUID 12 1 100 0 0 f f f f f t v 0 0 2249 "" "{26,25,3220,3220}" "{o,o,o,o}" "{local_id, external_id, remote_lsn, local_lsn}" _null_ _null_ pg_show_replication_origin_status _null_ _null_ _null_ ));
  DESCR("get progress for all replication origins");
  
- DATA(insert OID = 6015 ( pg_msgmodule_set PGNSP PGUID 12 1 1 0 0 f f f f t t i 4 0 16 "20 20 20 2275" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_set _null_ _null_ _null_ ));
 +#ifdef USE_MODULE_MSGIDS
- DATA(insert OID = 6016 ( pg_msgmodule_change PGNSP PGUID 12 1 1 0 0 f f f f t t i 4 0 16 "20 20 20 20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_change _null_ _null_ _null_ ));
++DATA(insert OID = 6015 ( pg_msgmodule_set PGNSP PGUID 12 1 1 0 0 f f f f t t i s 4 0 16 "20 20 20 2275" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_set _null_ _null_ _null_ ));
 +DESCR("set debugging level for module/file/msg");
- DATA(insert OID = 6017 ( pg_msgmodule_enable PGNSP PGUID 12 1 1 0 0 f f f f t t i 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_enable _null_ _null_ _null_ ));
++DATA(insert OID = 6016 ( pg_msgmodule_change PGNSP PGUID 12 1 1 0 0 f f f f t t i s 4 0 16 "20 20 20 20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_change _null_ _null_ _null_ ));
 +DESCR("change debugging level for module/file/msg");
- DATA(insert OID = 6018 ( pg_msgmodule_disable PGNSP PGUID 12 1 1 0 0 f f f f t t i 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_disable _null_ _null_ _null_ ));
++DATA(insert OID = 6017 ( pg_msgmodule_enable PGNSP PGUID 12 1 1 0 0 f f f f t t i s 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_enable _null_ _null_ _null_ ));
 +DESCR("pid to honour overriden log levels");
- DATA(insert OID = 6019 ( pg_msgmodule_enable_all PGNSP PGUID 12 1 1 0 0 f f f f t t i 1 0 16 "16" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_enable_all _null_ _null_ _null_ ));
++DATA(insert OID = 6018 ( pg_msgmodule_disable PGNSP PGUID 12 1 1 0 0 f f f f t t i s 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_disable _null_ _null_ _null_ ));
 +DESCR("pid to ignore overriden log levels");
- DATA(insert OID = 6020 ( pg_msgmodule_disable_all PGNSP PGUID 12 1 1 0 0 f f f f t t 0 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_disable_all _null_ _null_ _null_ ));
++DATA(insert OID = 6019 ( pg_msgmodule_enable_all PGNSP PGUID 12 1 1 0 0 f f f f t t i s 1 0 16 "16" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_enable_all _null_ _null_ _null_ ));
 +DESCR("all current/future processes to honour overriden log levels");
++DATA(insert OID = 6020 ( pg_msgmodule_disable_all PGNSP PGUID 12 1 1 0 0 f f f f t t 0 s 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_msgmodule_disable_all _null_ _null_ _null_ ));
 +DESCR("all processes to ignore overriden log levels");
 +#endif
 +
+ /* rls */
+ DATA(insert OID = 3298 (  row_security_active    PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_        row_security_active _null_ _null_ _null_ ));
+ DESCR("row security for current context active on table by table oid");
+ DATA(insert OID = 3299 (  row_security_active    PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "25" _null_ _null_ _null_ _null_ _null_        row_security_active_name _null_ _null_ _null_ ));
+ DESCR("row security for current context active on table by table name");
+ /* pg_config */
+ DATA(insert OID = 3400 ( pg_config PGNSP PGUID 12 1 23 0 0 f f f f t t i r 0 0 2249 "" "{25,25}" "{o,o}" "{name,setting}" _null_ _null_ pg_config _null_ _null_ _null_ ));
+ DESCR("pg_config binary as a function");
+ /* pg_controldata related functions */
+ DATA(insert OID = 3441 ( pg_control_system PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{23,23,20,1184}" "{o,o,o,o}" "{pg_control_version,catalog_version_no,system_identifier,pg_control_last_modified}" _null_ _null_ pg_control_system _null_ _null_ _null_ ));
+ DESCR("pg_controldata general state information as a function");
+ DATA(insert OID = 3442 ( pg_control_checkpoint PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{3220,3220,3220,25,23,23,16,25,26,28,28,28,26,28,28,26,28,28,1184}" "{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{checkpoint_location,prior_location,redo_location,redo_wal_file,timeline_id,prev_timeline_id,full_page_writes,next_xid,next_oid,next_multixact_id,next_multi_offset,oldest_xid,oldest_xid_dbid,oldest_active_xid,oldest_multi_xid,oldest_multi_dbid,oldest_commit_ts_xid,newest_commit_ts_xid,checkpoint_time}" _null_ _null_ pg_control_checkpoint _null_ _null_ _null_ ));
+ DESCR("pg_controldata checkpoint state information as a function");
+ DATA(insert OID = 3443 ( pg_control_recovery PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{3220,23,3220,3220,16}" "{o,o,o,o,o}" "{min_recovery_end_location,min_recovery_end_timeline,backup_start_location,backup_end_location,end_of_backup_record_required}" _null_ _null_ pg_control_recovery _null_ _null_ _null_ ));
+ DESCR("pg_controldata recovery state information as a function");
+ DATA(insert OID = 3444 ( pg_control_init PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{23,23,23,23,23,23,23,23,23,16,16,16,23}" "{o,o,o,o,o,o,o,o,o,o,o,o,o}" "{max_data_alignment,database_block_size,blocks_per_segment,wal_block_size,bytes_per_wal_segment,max_identifier_length,max_index_columns,max_toast_chunk_size,large_object_chunk_size,bigint_timestamps,float4_pass_by_value,float8_pass_by_value,data_page_checksum_version}" _null_ _null_ pg_control_init _null_ _null_ _null_ ));
+ DESCR("pg_controldata init state information as a function");
  /*
   * Symbolic values for provolatile column: these indicate whether the result
   * of a function is dependent *only* on the values of its explicit arguments,
index 855c98c64db99b5fe68db9bebf6731ef94b52888,162239c7ae6f3a9aed7c27e4d49d181c7cdb6bb1..cb71180aea32e5589ac0c8990a78240745291974
@@@ -5,8 -5,7 +5,8 @@@
   *      along with the relation's initial contents.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/catalog/pg_type.h
Simple merge
Simple merge
Simple merge
Simple merge
index 3993b89d616911ac90932fd5731378146c96b7ed,6af60d893bbd0bb00d83501e77553b9faf163d5d..30e2df6b540a6f4420f87f23b5ecb3e2a7d02a76
@@@ -3,8 -3,7 +3,8 @@@
   * sequence.h
   *      prototypes for sequence.c.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/commands/sequence.h
Simple merge
Simple merge
index 993ce7e3b87bd17e82053154c579f5270f98c3cd,80cd4a86d8e7d023715c54dc39661e5caad4f90f..b87bf2ace97e000aec0b39a5744d26798eadb5f0
@@@ -4,8 -4,7 +4,8 @@@
   *      header file for postgres vacuum cleaner and statistics analyzer
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/commands/vacuum.h
index fd5d81e8d52c21ce665a7f40ed335ce463e8e010,81059515da76336601baefe7a6b1edb28537ca50..4997e1e166ce4a67e26225c6cbf84b0a11827e9d
@@@ -2,8 -2,7 +2,8 @@@
   * variable.h
   *            Routines for handling specialized SET variables.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/commands/variable.h
Simple merge
index 6bbd6686a77aed896c67075e4f5d0802e653cbbd,d5b3fc8dbd0c3586370b5313fb8a47212ae20ff9..ca9edf539fd15170a80b890363b6e7f0e58017d2
@@@ -5,8 -5,7 +5,8 @@@
   *      and related modules.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/executor/execdesc.h
index 7bca10f7316d7bf184ade85872a3bbe698b16fd1,39521ed08e36c1b893934db6de20c53d9ab0664c..7b5cf2f1f769613c3d4673742d45eacc084e147c
@@@ -4,8 -4,7 +4,8 @@@
   *      support for the POSTGRES executor module
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/executor/executor.h
Simple merge
index 6c77eb7d7f50ccb0612de4b101dc5aa935a8a094,5ac0b6a1f61553e43c58f0f98b5121991b6269de..bfcca219e0c88f57ac4c03380e5cb88ea9e631fc
@@@ -4,8 -4,7 +4,8 @@@
   *      tuple table support stuff
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/executor/tuptable.h
Simple merge
index be1cb664dfa85f4b46c2810a6d5a73e05fb3b057,78545daecec2ba030eca4c35636e5ceb757166d4..bb0d7d1dac6634190733f1678fb08a1864102907
@@@ -10,8 -10,7 +10,8 @@@
   *      Over time, this has also become the preferred place for widely known
   *      resource-limitation stuff, such as work_mem and check_stack_depth().
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/miscadmin.h
Simple merge
index f6cc809963d7fca1f5809ac2b8d17e8db0481ae4,e7fd7bd08eef05356dfc740ad69b84e539e9ebca..411d969b3b1908f868c8e72491379a069442e4f7
@@@ -4,8 -4,7 +4,8 @@@
   *      definitions for executor state nodes
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/nodes/execnodes.h
index e515e2e4dfd3fbc86a35c85e8445eae5325e2da3,6b850e4bc4ecbd4aded4db991168900144fea02f..b8bd0d99d2c9580d864c82a6616f15746e9acac3
@@@ -4,10 -4,8 +4,10 @@@
   *      Definitions for tagged nodes.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/nodes/nodes.h
   *
@@@ -566,18 -550,27 +594,33 @@@ extern PGDLLIMPORT Node *newNodeMacroHo
  /*
   * nodes/{outfuncs.c,print.c}
   */
 +#ifdef XCP
 +extern void set_portable_output(bool value);
 +#endif
  extern char *nodeToString(const void *obj);
  
+ struct Bitmapset;                             /* not to include bitmapset.h here */
+ struct StringInfoData;                        /* not to include stringinfo.h here */
+ extern void outNode(struct StringInfoData *str, const void *obj);
+ extern void outToken(struct StringInfoData *str, const char *s);
+ extern void outBitmapset(struct StringInfoData *str,
+                        const struct Bitmapset *bms);
+ extern void outDatum(struct StringInfoData *str, uintptr_t value,
+                int typlen, bool typbyval);
  /*
   * nodes/{readfuncs.c,read.c}
   */
 +#ifdef XCP
 +extern void set_portable_input(bool value);
 +#endif
  extern void *stringToNode(char *str);
+ extern struct Bitmapset *readBitmapset(void);
+ extern uintptr_t readDatum(bool typbyval);
+ extern bool *readBoolCols(int numCols);
+ extern int *readIntCols(int numCols);
+ extern Oid *readOidCols(int numCols);
+ extern int16 *readAttrNumberCols(int numCols);
  
  /*
   * nodes/copyfuncs.c
index 96613c72c81a8968f7ad07314ae2a474e3667c55,a8aa530b74de9625475317eeeedeeee9b0f30ec4..79b310647b709a7cf17ee823a83407793eed2251
@@@ -4,8 -4,7 +4,8 @@@
   *      Support for finding the values associated with Param nodes.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/nodes/params.h
index 8f26b00a001496d5b1e349d5cc8da542c0a37606,1481fff57de9169b72b9704e8e106e0f8aeb3680..751f67285a57b6e4bfac8550d392e3d3f1ef5dce
   * the location.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/nodes/parsenodes.h
   *
Simple merge
index 09b6362d86b611f9a83471653a00fa2fde88c0d7,369179f2912e8919f5bfa605e5eb017ed07000f5..fbdaa2cce66cbcad5a872a723ab91ca222b4df6c
@@@ -4,8 -4,7 +4,8 @@@
   *      definitions for query plan nodes
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/nodes/plannodes.h
@@@ -70,21 -73,6 +74,19 @@@ typedef struct PlannedStm
        List       *invalItems;         /* other dependencies, as PlanInvalItems */
  
        int                     nParamExec;             /* number of PARAM_EXEC Params used */
-       bool            hasRowSecurity; /* row security applied? */
 +#ifdef XCP
 +      int                     nParamRemote;   /* number of params sent from the master mode */
 +
 +      struct RemoteParam *remoteparams;/* parameter descriptors */
 +
 +      const char *pname;                      /* the portal name */
 +
 +      /* Parameters to filter out result rows */
 +      char            distributionType;
 +      AttrNumber  distributionKey;
 +      List       *distributionNodes;
 +      List       *distributionRestrict;
 +#endif        
  } PlannedStmt;
  
  /* macro for fetching the Plan associated with a SubPlan node */
@@@ -700,6 -692,6 +706,16 @@@ typedef struct Grou
        Oid                *grpOperators;       /* equality operators to compare with */
  } Group;
  
++#ifdef XCP
++typedef enum AggDistribution
++{
++      AGG_ONENODE,                            /* not distributed aggregation */
++      AGG_SLAVE,                                      /* execute only transient function */
++      AGG_MASTER                                      /* execute collection function as transient
++                                                               * and final finction */
++} AggDistribution;
++#endif
++
  /* ---------------
   *            aggregate node
   *
  typedef struct Agg
  {
        Plan            plan;
-       AggStrategy aggstrategy;
+       AggStrategy aggstrategy;        /* basic strategy, see nodes.h */
 +#ifdef XCP
 +      AggDistribution aggdistribution;
 +#endif
+       AggSplit        aggsplit;               /* agg-splitting mode, see nodes.h */
        int                     numCols;                /* number of grouping columns */
        AttrNumber *grpColIdx;          /* their indexes in the target list */
        Oid                *grpOperators;       /* equality operators to compare with */
index b4c44d41b83cfa720bc5cf0915fba0ed798b3571,df2d27d77ca6ca8270dd3a3d85f448fb5e25f022..f22afd38033702917a0d01e268ec3d6e3081f8a0
@@@ -7,10 -7,8 +7,10 @@@
   *      and join trees.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/nodes/primnodes.h
   *
@@@ -264,10 -280,8 +286,12 @@@ typedef struct Aggre
        Oid                     aggtype;                /* type Oid of result of the aggregate */
        Oid                     aggcollid;              /* OID of collation of result */
        Oid                     inputcollid;    /* OID of collation that function should use */
 +#ifdef PGXC
 +      Oid                     aggtrantype;    /* type Oid of transition results */
 +      bool            agghas_collectfn;       /* is collection function available */
 +#endif /* PGXC */
+       Oid                     aggtranstype;   /* type Oid of aggregate's transition value */
+       List       *aggargtypes;        /* type Oids of direct and aggregated args */
        List       *aggdirectargs;      /* direct arguments, if an ordered-set agg */
        List       *args;                       /* aggregated arguments and sort expressions */
        List       *aggorder;           /* ORDER BY (list of SortGroupClause) */
index 9e117a711a09893386eb99af2a8543a157d6557d,2be8908445b03cdcecc8786a5128aac8015ff890..c0bd1a7bc8c9fd967318072384f30fe69e2a2e63
@@@ -4,8 -4,7 +4,8 @@@
   *      Definitions for planner's internal data structures.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/nodes/relation.h
@@@ -283,18 -304,6 +327,15 @@@ typedef struct PlannerInf
  
        /* optional private data for join_search_hook, e.g., GEQO */
        void       *join_search_private;
-       /* for GroupingFunc fixup in setrefs */
-       AttrNumber *grouping_map;
 +#ifdef XCP
 +      /*
 +       * This is NULL for a SELECT query (NULL distribution means "Coordinator"
 +       * everywhere in the planner. For INSERT, UPDATE or DELETE it should match
 +       * to the target table distribution.
 +       */
 +      Distribution *distribution; /* Query result distribution */
 +      bool            recursiveOk;
 +#endif
  } PlannerInfo;
  
  
@@@ -1068,14 -1178,18 +1213,26 @@@ typedef struct UniquePat
        List       *uniq_exprs;         /* expressions to be made unique */
  } UniquePath;
  
 +#ifdef XCP
 +typedef struct RemoteSubPath
 +{
 +      Path            path;
 +      Path       *subpath;
 +} RemoteSubPath;
 +#endif
 +
+ /*
+  * GatherPath runs several copies of a plan in parallel and collects the
+  * results.  The parallel leader may also execute the plan, unless the
+  * single_copy flag is set.
+  */
+ typedef struct GatherPath
+ {
+       Path            path;
+       Path       *subpath;            /* path for each worker */
+       bool            single_copy;    /* path must not be executed >1x */
+ } GatherPath;
  /*
   * All join-type paths share these fields.
   */
@@@ -1161,50 -1271,202 +1318,246 @@@ typedef struct HashPat
        int                     num_batches;    /* number of batches expected */
  } HashPath;
  
 +#ifdef PGXC
 +/*
 + * A remotequery path represents the queries to be sent to the datanode/s
 + *
 + * When RemoteQuery plan is created from RemoteQueryPath, we build the query to
 + * be executed at the datanode. For building such a query, it's important to get
 + * the RHS relation and LHS relation of the JOIN clause. So, instead of storing
 + * the outer and inner paths, we find out the RHS and LHS paths and store those
 + * here.
 + */
 +
 +typedef struct RemoteQueryPath
 +{
 +      Path                    path;
 +      ExecNodes               *rqpath_en;             /* List of datanodes to execute the query on */
 +
 +      /*
 +       * If the path represents a JOIN rel, leftpath and rightpath represent the
 +       * RemoteQuery paths for left (outer) and right (inner) side of the JOIN
 +       * resp. jointype and join_restrictlist pertains to such JOINs. 
 +       */
 +      struct RemoteQueryPath  *leftpath;
 +      struct RemoteQueryPath  *rightpath;
 +      JoinType                                jointype;
 +      List                                    *join_restrictlist;     /* restrict list corresponding to JOINs,
 +                                                                                               * only considered if rest of
 +                                                                                               * the JOIN information is
 +                                                                                               * available
 +                                                                                               */
 +      bool                                    has_unshippable_qual;   /* TRUE if there is at least
 +                                                                                                       * one qual which can not be
 +                                                                                                       * shipped to the datanodes
 +                                                                                                       */
 +      bool                                    has_temp_rel;                   /* TRUE if one of the base relations
 +                                                                                                       * involved in this path is a temporary
 +                                                                                                       * table.
 +                                                                                                       */
 +      bool                                    has_unshippable_tlist;  /* TRUE if there is at least one
 +                                                                                                       * targetlist entry which is
 +                                                                                                       * not completely shippable.
 +                                                                                                       */
 +} RemoteQueryPath;
 +#endif /* PGXC */
 +
+ /*
+  * ProjectionPath represents a projection (that is, targetlist computation)
+  *
+  * Nominally, this path node represents using a Result plan node to do a
+  * projection step.  However, if the input plan node supports projection,
+  * we can just modify its output targetlist to do the required calculations
+  * directly, and not need a Result.  In some places in the planner we can just
+  * jam the desired PathTarget into the input path node (and adjust its cost
+  * accordingly), so we don't need a ProjectionPath.  But in other places
+  * it's necessary to not modify the input path node, so we need a separate
+  * ProjectionPath node, which is marked dummy to indicate that we intend to
+  * assign the work to the input plan node.  The estimated cost for the
+  * ProjectionPath node will account for whether a Result will be used or not.
+  */
+ typedef struct ProjectionPath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+       bool            dummypp;                /* true if no separate Result is needed */
+ } ProjectionPath;
+ /*
+  * SortPath represents an explicit sort step
+  *
+  * The sort keys are, by definition, the same as path.pathkeys.
+  *
+  * Note: the Sort plan node cannot project, so path.pathtarget must be the
+  * same as the input's pathtarget.
+  */
+ typedef struct SortPath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+ } SortPath;
+ /*
+  * GroupPath represents grouping (of presorted input)
+  *
+  * groupClause represents the columns to be grouped on; the input path
+  * must be at least that well sorted.
+  *
+  * We can also apply a qual to the grouped rows (equivalent of HAVING)
+  */
+ typedef struct GroupPath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+       List       *groupClause;        /* a list of SortGroupClause's */
+       List       *qual;                       /* quals (HAVING quals), if any */
+ } GroupPath;
+ /*
+  * UpperUniquePath represents adjacent-duplicate removal (in presorted input)
+  *
+  * The columns to be compared are the first numkeys columns of the path's
+  * pathkeys.  The input is presumed already sorted that way.
+  */
+ typedef struct UpperUniquePath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+       int                     numkeys;                /* number of pathkey columns to compare */
+ } UpperUniquePath;
+ /*
+  * AggPath represents generic computation of aggregate functions
+  *
+  * This may involve plain grouping (but not grouping sets), using either
+  * sorted or hashed grouping; for the AGG_SORTED case, the input must be
+  * appropriately presorted.
+  */
+ typedef struct AggPath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+       AggStrategy aggstrategy;        /* basic strategy, see nodes.h */
+       AggSplit        aggsplit;               /* agg-splitting mode, see nodes.h */
+       double          numGroups;              /* estimated number of groups in input */
+       List       *groupClause;        /* a list of SortGroupClause's */
+       List       *qual;                       /* quals (HAVING quals), if any */
+ } AggPath;
+ /*
+  * GroupingSetsPath represents a GROUPING SETS aggregation
+  *
+  * Currently we only support this in sorted not hashed form, so the input
+  * must always be appropriately presorted.
+  */
+ typedef struct GroupingSetsPath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+       List       *rollup_groupclauses;        /* list of lists of SortGroupClause's */
+       List       *rollup_lists;       /* parallel list of lists of grouping sets */
+       List       *qual;                       /* quals (HAVING quals), if any */
+ } GroupingSetsPath;
+ /*
+  * MinMaxAggPath represents computation of MIN/MAX aggregates from indexes
+  */
+ typedef struct MinMaxAggPath
+ {
+       Path            path;
+       List       *mmaggregates;       /* list of MinMaxAggInfo */
+       List       *quals;                      /* HAVING quals, if any */
+ } MinMaxAggPath;
+ /*
+  * WindowAggPath represents generic computation of window functions
+  *
+  * Note: winpathkeys is separate from path.pathkeys because the actual sort
+  * order might be an extension of winpathkeys; but createplan.c needs to
+  * know exactly how many pathkeys match the window clause.
+  */
+ typedef struct WindowAggPath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+       WindowClause *winclause;        /* WindowClause we'll be using */
+       List       *winpathkeys;        /* PathKeys for PARTITION keys + ORDER keys */
+ } WindowAggPath;
+ /*
+  * SetOpPath represents a set-operation, that is INTERSECT or EXCEPT
+  */
+ typedef struct SetOpPath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+       SetOpCmd        cmd;                    /* what to do, see nodes.h */
+       SetOpStrategy strategy;         /* how to do it, see nodes.h */
+       List       *distinctList;       /* SortGroupClauses identifying target cols */
+       AttrNumber      flagColIdx;             /* where is the flag column, if any */
+       int                     firstFlag;              /* flag value for first input relation */
+       double          numGroups;              /* estimated number of groups in input */
+ } SetOpPath;
+ /*
+  * RecursiveUnionPath represents a recursive UNION node
+  */
+ typedef struct RecursiveUnionPath
+ {
+       Path            path;
+       Path       *leftpath;           /* paths representing input sources */
+       Path       *rightpath;
+       List       *distinctList;       /* SortGroupClauses identifying target cols */
+       int                     wtParam;                /* ID of Param representing work table */
+       double          numGroups;              /* estimated number of groups in input */
+ } RecursiveUnionPath;
+ /*
+  * LockRowsPath represents acquiring row locks for SELECT FOR UPDATE/SHARE
+  */
+ typedef struct LockRowsPath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+       List       *rowMarks;           /* a list of PlanRowMark's */
+       int                     epqParam;               /* ID of Param for EvalPlanQual re-eval */
+ } LockRowsPath;
+ /*
+  * ModifyTablePath represents performing INSERT/UPDATE/DELETE modifications
+  *
+  * We represent most things that will be in the ModifyTable plan node
+  * literally, except we have child Path(s) not Plan(s).  But analysis of the
+  * OnConflictExpr is deferred to createplan.c, as is collection of FDW data.
+  */
+ typedef struct ModifyTablePath
+ {
+       Path            path;
+       CmdType         operation;              /* INSERT, UPDATE, or DELETE */
+       bool            canSetTag;              /* do we set the command tag/es_processed? */
+       Index           nominalRelation;        /* Parent RT index for use of EXPLAIN */
+       List       *resultRelations;    /* integer list of RT indexes */
+       List       *subpaths;           /* Path(s) producing source data */
+       List       *subroots;           /* per-target-table PlannerInfos */
+       List       *withCheckOptionLists;       /* per-target-table WCO lists */
+       List       *returningLists; /* per-target-table RETURNING tlists */
+       List       *rowMarks;           /* PlanRowMarks (non-locking only) */
+       OnConflictExpr *onconflict; /* ON CONFLICT clause, or NULL */
+       int                     epqParam;               /* ID of Param for EvalPlanQual re-eval */
+ } ModifyTablePath;
+ /*
+  * LimitPath represents applying LIMIT/OFFSET restrictions
+  */
+ typedef struct LimitPath
+ {
+       Path            path;
+       Path       *subpath;            /* path representing input source */
+       Node       *limitOffset;        /* OFFSET parameter, or NULL if none */
+       Node       *limitCount;         /* COUNT parameter, or NULL if none */
+ } LimitPath;
  /*
   * Restriction clause info.
   *
index 255464406f1385a242ac8a1b1b4336b04ea6d174,2a4df2fc166c49268f87107fa5a14216f6e2dfbd..0a70840dd3cfbf8ffb9d95a6da7def3217c18322
@@@ -4,8 -4,7 +4,8 @@@
   *      prototypes for costsize.c and clausesel.c.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/optimizer/cost.h
  #define DEFAULT_CPU_TUPLE_COST        0.01
  #define DEFAULT_CPU_INDEX_TUPLE_COST 0.005
  #define DEFAULT_CPU_OPERATOR_COST  0.0025
 +#ifdef XCP
 +#define DEFAULT_NETWORK_BYTE_COST  0.001
 +#define DEFAULT_REMOTE_QUERY_COST  100.0
 +#endif
+ #define DEFAULT_PARALLEL_TUPLE_COST 0.1
+ #define DEFAULT_PARALLEL_SETUP_COST  1000.0
  
  #define DEFAULT_EFFECTIVE_CACHE_SIZE  524288  /* measured in pages */
  
@@@ -53,12 -50,11 +55,15 @@@ extern PGDLLIMPORT double random_page_c
  extern PGDLLIMPORT double cpu_tuple_cost;
  extern PGDLLIMPORT double cpu_index_tuple_cost;
  extern PGDLLIMPORT double cpu_operator_cost;
 +#ifdef XCP
 +extern PGDLLIMPORT double network_byte_cost;
 +extern PGDLLIMPORT double remote_query_cost;
 +#endif
+ extern PGDLLIMPORT double parallel_tuple_cost;
+ extern PGDLLIMPORT double parallel_setup_cost;
  extern PGDLLIMPORT int effective_cache_size;
  extern Cost disable_cost;
+ extern int    max_parallel_workers_per_gather;
  extern bool enable_seqscan;
  extern bool enable_indexscan;
  extern bool enable_indexonlyscan;
@@@ -99,12 -91,9 +104,12 @@@ extern void cost_functionscan(Path *pat
                                  RelOptInfo *baserel, ParamPathInfo *param_info);
  extern void cost_valuesscan(Path *path, PlannerInfo *root,
                                RelOptInfo *baserel, ParamPathInfo *param_info);
 +#ifdef PGXC
 +extern void cost_remotequery(Path *path, PlannerInfo *root, RelOptInfo *baserel);
 +#endif
  extern void cost_ctescan(Path *path, PlannerInfo *root,
                         RelOptInfo *baserel, ParamPathInfo *param_info);
- extern void cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm);
+ extern void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm);
  extern void cost_sort(Path *path, PlannerInfo *root,
                  List *pathkeys, Cost input_cost, double tuples, int width,
                  Cost comparison_cost, int sort_mem,
index dbc8647538252b3c95481eb1d208f9bbfc47060e,71d9154a5cfdbcfc518a3e099634682cf300bccf..e11b92ea2b6a1472694b43ef8e079e317eb78e28
@@@ -4,8 -4,7 +4,8 @@@
   *      prototypes for pathnode.c, relnode.c.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/optimizer/pathnode.h
@@@ -70,14 -73,12 +74,18 @@@ extern ResultPath *create_result_path(P
  extern MaterialPath *create_material_path(RelOptInfo *rel, Path *subpath);
  extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel,
                                   Path *subpath, SpecialJoinInfo *sjinfo);
- extern Path *create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
-                                                List *pathkeys, Relids required_outer,
+ extern GatherPath *create_gather_path(PlannerInfo *root,
+                                  RelOptInfo *rel, Path *subpath, PathTarget *target,
+                                  Relids required_outer, double *rows);
 +#ifdef XCP
- extern Path *create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
++extern SubqueryScanPath *create_subqueryscan_path(PlannerInfo *root,
++                                               RelOptInfo *rel, Path *subpathList *pathkeys, Relids required_outer,
 +                                               Distribution *distribution);
 +#else
+ extern SubqueryScanPath *create_subqueryscan_path(PlannerInfo *root,
+                                                RelOptInfo *rel, Path *subpath,
                                                 List *pathkeys, Relids required_outer);
 +#endif
  extern Path *create_functionscan_path(PlannerInfo *root, RelOptInfo *rel,
                                                 List *pathkeys, Relids required_outer);
  extern Path *create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel,
index a40f4d1d705c615d4ae39f8b9a7ee56ffd9bd028,4fbb6cc3e7e41af3e6d25786e3fa4b23f5701ab1..1da50f09d22284d902d4eb4492c941043dc17380
@@@ -4,8 -4,7 +4,8 @@@
   *      prototypes for various files in optimizer/plan
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/optimizer/planmain.h
  
  #include "nodes/plannodes.h"
  #include "nodes/relation.h"
 +#ifdef XCP
 +#include "pgxc/planner.h"
 +#endif
  
+ /* possible values for force_parallel_mode */
+ typedef enum
+ {
+       FORCE_PARALLEL_OFF,
+       FORCE_PARALLEL_ON,
+       FORCE_PARALLEL_REGRESS
+ }     ForceParallelMode;
  /* GUC parameters */
  #define DEFAULT_CURSOR_TUPLE_FRACTION 0.1
  extern double cursor_tuple_fraction;
index 772a58402c4b9913d37b8f363b70a2d8e036eec7,d9790d7a970bdf7bacf7299e3c02a3ee0a361e27..a623b9a009805c24c926c3e8b92aa651905cb44a
@@@ -43,11 -46,12 +46,16 @@@ extern bool is_dummy_plan(Plan *plan)
  extern RowMarkType select_rowmark_type(RangeTblEntry *rte,
                                        LockClauseStrength strength);
  
+ extern void mark_partial_aggref(Aggref *agg, AggSplit aggsplit);
+ extern Path *get_cheapest_fractional_path(RelOptInfo *rel,
+                                                        double tuple_fraction);
  extern Expr *expression_planner(Expr *expr);
 +#ifdef PGXC
 +extern void GetHashExecNodes(RelationLocInfo *rel_loc_info, 
 +                                                      ExecNodes **exec_nodes, const Expr *expr);
 +#endif
  
  extern Expr *preprocess_phv_expression(PlannerInfo *root, Expr *expr);
  
index 82c8610be0b8db27f9b3f50e44fd69dd4953925c,5ba322a262c67495b47588582f06f4edb51027d7..6e3c47dafff4eb5c73b2e690ae68baa5e7c19e33
@@@ -4,8 -4,7 +4,8 @@@
   *            parse analysis for optimizable statements
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/parser/analyze.h
Simple merge
index 13ac665f82b36449eb17c7b6c4beeae3cec53feb,17ffef53a70cbf1358fd5fa6803452a45b44a053..ca265b4de243c377a4143b7671cb479e36d4b3a7
@@@ -7,9 -7,8 +7,9 @@@
   * by the PG_KEYWORD macro, which is not defined in this file; it can
   * be defined by the caller for special purposes.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * IDENTIFICATION
   *      src/include/parser/kwlist.h
@@@ -133,9 -125,9 +133,10 @@@ PG_KEYWORD("definer", DEFINER, UNRESERV
  PG_KEYWORD("delete", DELETE_P, UNRESERVED_KEYWORD)
  PG_KEYWORD("delimiter", DELIMITER, UNRESERVED_KEYWORD)
  PG_KEYWORD("delimiters", DELIMITERS, UNRESERVED_KEYWORD)
+ PG_KEYWORD("depends", DEPENDS, UNRESERVED_KEYWORD)
  PG_KEYWORD("desc", DESC, RESERVED_KEYWORD)
  PG_KEYWORD("dictionary", DICTIONARY, UNRESERVED_KEYWORD)
 +PG_KEYWORD("direct", DIRECT, UNRESERVED_KEYWORD)
  PG_KEYWORD("disable", DISABLE_P, UNRESERVED_KEYWORD)
  PG_KEYWORD("discard", DISCARD, UNRESERVED_KEYWORD)
  PG_KEYWORD("distinct", DISTINCT, RESERVED_KEYWORD)
index e90ad669870aea6994ec3e4d6a1946d78b6aff61,28fc354a424c8cdd040f8991c4314f41c2b2b168..20909ad43f87b02ecc3f89eef3d36408a1f32ee8
@@@ -3,8 -3,7 +3,8 @@@
   * parse_agg.h
   *      handle aggregates and window functions in parser
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/parser/parse_agg.h
@@@ -36,28 -35,34 +36,43 @@@ extern Oid resolve_aggregate_transtype(
                                                        Oid *inputTypes,
                                                        int numArguments);
  
- extern void build_aggregate_fnexprs(Oid *agg_input_types,
-                                               int agg_num_inputs,
-                                               int agg_num_direct_inputs,
-                                               int num_finalfn_inputs,
-                                               bool agg_variadic,
-                                               Oid agg_state_type,
+ extern void build_aggregate_transfn_expr(Oid *agg_input_types,
+                                                        int agg_num_inputs,
+                                                        int agg_num_direct_inputs,
+                                                        bool agg_variadic,
+                                                        Oid agg_state_type,
 +#ifdef XCP
 +                                              Oid agg_collect_type,
 +#endif
-                                               Oid agg_result_type,
-                                               Oid agg_input_collation,
-                                               Oid transfn_oid,
+                                                        Oid agg_input_collation,
+                                                        Oid transfn_oid,
 +#ifdef XCP
 +                                              Oid collectfn_oid,
 +#endif
-                                               Oid invtransfn_oid,
-                                               Oid finalfn_oid,
-                                               Expr **transfnexpr,
-                                               Expr **invtransfnexpr,
+                                                        Oid invtransfn_oid,
+                                                        Expr **transfnexpr,
 +#ifdef XCP
 +                                              Expr **collectfnexpr,
 +#endif
-                                               Expr **finalfnexpr);
+                                                        Expr **invtransfnexpr);
+ extern void build_aggregate_combinefn_expr(Oid agg_state_type,
+                                                          Oid agg_input_collation,
+                                                          Oid combinefn_oid,
+                                                          Expr **combinefnexpr);
+ extern void build_aggregate_serialfn_expr(Oid serialfn_oid,
+                                                         Expr **serialfnexpr);
+ extern void build_aggregate_deserialfn_expr(Oid deserialfn_oid,
+                                                               Expr **deserialfnexpr);
+ extern void build_aggregate_finalfn_expr(Oid *agg_input_types,
+                                                        int num_finalfn_inputs,
+                                                        Oid agg_state_type,
+                                                        Oid agg_result_type,
+                                                        Oid agg_input_collation,
+                                                        Oid finalfn_oid,
+                                                        Expr **finalfnexpr);
  
  #endif   /* PARSE_AGG_H */
Simple merge
Simple merge
index 06b24ed10630de97f8440b4a73979cbe58f6c00b,be3b6f70c1a2a7bc46675fee304b7cff6c70e5ed..f4497d6ceab4500f735649c7505b89daff3542ac
@@@ -4,10 -4,8 +4,10 @@@
   *            parse analysis for utility commands
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/parser/parse_utilcmd.h
   *
Simple merge
Simple merge
Simple merge
index c50889b5fc5964f580e3c61a813b0f7a4f87bebd,b6b88fcf0d2b6be087634dc7b95d1b03e298d8a8..bb8cb3a5fb8aaa3cdb67a306ead12facb4a94e86
  #define MEMSET_LOOP_LIMIT 1024
  
  /* Define to the address where bug reports for this package should be sent. */
 -#define PACKAGE_BUGREPORT "p[email protected]"
 +#define PACKAGE_BUGREPORT "p[email protected]"
  
  /* Define to the full name of this package. */
 -#define PACKAGE_NAME "PostgreSQL"
 +#define PACKAGE_NAME "Postgres-XL"
  
  /* Define to the full name and version of this package. */
- #define PACKAGE_STRING "Postgres-XL 9.5alpha1"
 -#define PACKAGE_STRING "PostgreSQL 9.6beta4"
++#define PACKAGE_STRING "Postgres-XL 9.6alpha1"
  
  /* Define to the version of this package. */
- #define PACKAGE_VERSION "9.5alpha1"
 -#define PACKAGE_VERSION "9.6beta4"
++#define PACKAGE_VERSION "9.6alpha1"
  
  /* Define to the name of a signed 128-bit integer type. */
  #undef PG_INT128_TYPE
  #define PG_INT64_TYPE long long int
  
  /* PostgreSQL version as a string */
- #define PG_VERSION "9.5alpha1"
+ #define PG_VERSION "9.6beta4"
  
  /* PostgreSQL version as a number */
- #define PG_VERSION_NUM 90500
+ #define PG_VERSION_NUM 90600
  
  /* Define to the one symbol short name of this package. */
 -#define PACKAGE_TARNAME "postgresql"
 +#define PACKAGE_TARNAME "postgres-xl"
 +
 +/* Postgres-XC version as a string */
 +#define PGXC_VERSION "1.1devel"
 +
 +/* Postgres-XC version as a number */
 +#define PGXC_VERSION_NUM 10100
  
  /* Define to the name of the default PostgreSQL service principal in Kerberos.
     (--with-krb-srvnam=NAME) */
index f9380983a51692c1e9df056893d75fce874eda06,dc3320d091fbc9138454ce57a90d3ec1e9bab745..bace5c6bd101a2abdee41caa8932f9726aec0ac4
@@@ -3,8 -3,7 +3,8 @@@
   *
   *    Definitions for the PostgreSQL statistics collector daemon.
   *
-  *    Copyright (c) 2001-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  *    Copyright (c) 2001-2016, PostgreSQL Global Development Group
   *
   *    src/include/pgstat.h
   * ----------
Simple merge
index 142d7813441a46a4ad73e2180a137a317324f163,fb1933f8f288bea512515f674e05dcc28cccbbff..3b93f7b3bbcbc97339fb8bb00e88e844979c6fa7
@@@ -7,9 -7,8 +7,9 @@@
   * Client-side code should include postgres_fe.h instead.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1995, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/postgres.h
   *
index cba0f10a26f2a69faa749ce2feb1104eb76f75e0,b5000b0822ae6371f1862978425863f4a31ab53f..5b1ee8fd218687571b02ebd01bbeb1d0fd35b493
@@@ -4,9 -4,8 +4,9 @@@
   *      header file for integrated autovacuum daemon
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/postmaster/autovacuum.h
   *
Simple merge
index 4e5347a871f635f0bd1910c5891ed481c099417a,dec8a97271506d9724b5d3923ac71dc4b78ece0f..4ec99d8a18c1d295a88110f949620ffb44d17569
@@@ -4,8 -4,7 +4,8 @@@
   *      POSTGRES backend id communication definitions
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/storage/backendid.h
@@@ -25,21 -24,14 +25,30 @@@ typedef int BackendId;                     /* unique curr
  
  extern PGDLLIMPORT BackendId MyBackendId;             /* backend id of this backend */
  
 +#ifdef XCP
 +/*
 + * Two next variables make up distributed session id. Actual distributed
 + * session id is a string, which includes coordinator node name, but
 + * it is better to use Oid to store and compare with distributed session ids
 + * of other backends under the same postmaster.
 + */
 +extern PGDLLIMPORT Oid MyCoordId;
 +extern PGDLLIMPORT char MyCoordName[NAMEDATALEN];
 +
 +extern PGDLLIMPORT int MyCoordPid;
 +extern PGDLLIMPORT LocalTransactionId MyCoordLxid;
 +
 +/* BackendId of the first backend of the distributed session on the node */
 +extern PGDLLIMPORT BackendId MyFirstBackendId;
 +#endif
+ /* backend id of our parallel session leader, or InvalidBackendId if none */
+ extern PGDLLIMPORT BackendId ParallelMasterBackendId;
+ /*
+  * The BackendId to use for our session's temp relations is normally our own,
+  * but parallel workers should use their leader's ID.
+  */
+ #define BackendIdForTempRelations() \
+       (ParallelMasterBackendId == InvalidBackendId ? MyBackendId : ParallelMasterBackendId)
  
  #endif   /* BACKENDID_H */
Simple merge
index 2d6fe193e9145bd5453206ace5063bd6d052db1f,3db11e43f0da2a440f9f5bb5521c33fdcd591bc3..e41a3ae0db6ff48bc2c9d56501807a3e6d513d54
@@@ -4,8 -4,7 +4,8 @@@
   *      Lightweight lock manager
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/storage/lwlock.h
index 571124ad261b6a1303ba35ef2f53a25ff2b82c3c,775c66a197136640512ef22be42b2025811cdf14..bc336fbaff7d1f29ad4f7946c4f755a9bee1e6ad
@@@ -4,8 -4,7 +4,8 @@@
   *      per-process shared memory data structures
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/storage/proc.h
index 1872559532eb1ae11cd0f614067e5a83d988a0f4,dd37c0cb07086fc916fb731ca6ecc150b7d290de..ea12e5c795a2fd8e997541da18e0bbe37cb7e84c
@@@ -4,10 -4,8 +4,10 @@@
   *      POSTGRES process array definitions.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/storage/procarray.h
   *
index 89692e2500e075c9d34dd434742cdd1003d78bc6,f67b9821f23d845e9d179edfdc9cf08b5be69f15..105fbaffea476269f7f6c2e8db9c64ced885c00a
@@@ -4,8 -4,7 +4,8 @@@
   *      Routines for interprocess signalling
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/storage/procsignal.h
Simple merge
index 1eaecde37b264c126a2911b5c1f80a1be2493d0d,a8e7877f704f7982fed73690a4185e127de73170..7e384f6ea7af63d334fb7cfbf8621b21e340d3c7
@@@ -4,8 -4,7 +4,8 @@@
   *      storage manager switch public interface declarations.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/storage/smgr.h
index 7cafe2a4948b22fc2536b939ef9a9e17a88a5735,dd80433f74fb932a1a6cd34f9e3a337d474a04e1..746d106a3997a0a2ca1542794f88b820dec5f42a
@@@ -57,8 -57,7 +57,8 @@@
   * calls in portal and cursor manipulations.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/tcop/dest.h
@@@ -94,11 -93,9 +94,12 @@@ typedef enu
        DestTuplestore,                         /* results sent to Tuplestore */
        DestIntoRel,                            /* results sent to relation (SELECT INTO) */
        DestCopyOut,                            /* results sent to COPY TO code */
-       DestSQLFunction,                                /* results sent to SQL-language func mgr */
+       DestSQLFunction,                        /* results sent to SQL-language func mgr */
 +#ifdef XCP
 +      DestProducer,                           /* results sent to a SharedQueue */
 +#endif
-       DestTransientRel                        /* results sent to transient relation */
+       DestTransientRel,                       /* results sent to transient relation */
+       DestTupleQueue                          /* results sent to tuple queue */
  } CommandDest;
  
  /* ----------------
index 467eb7161ed65187ebf142417e67d3b2a0056ee9,e04fc4330d92f4f14493efa86619df0d79bfab2b..ba34a8446adda4c5618be922635161847dd5b8b8
@@@ -4,8 -4,7 +4,8 @@@
   *      prototypes for pquery.c.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/tcop/pquery.h
Simple merge
Simple merge
index 0523a3e02af8a77361faa32e65c2079299f1266b,a91be981b9873bd114c16d1459b7df7be02c663e..a09e03724bd13c51218c9f013cf0afa458156a3c
@@@ -4,8 -4,7 +4,8 @@@
   *      Declarations for operations on built-in types.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/utils/builtins.h
  #define BUILTINS_H
  
  #include "fmgr.h"
 +#include "lib/stringinfo.h"
  #include "nodes/parsenodes.h"
 -
 +#ifdef PGXC
 +#include "lib/stringinfo.h"
 +#endif
+ #include "utils/sortsupport.h"
  /*
   *            Defined in adt/
   */
@@@ -416,20 -434,16 +438,22 @@@ extern Datum dpi(PG_FUNCTION_ARGS)
  extern Datum radians(PG_FUNCTION_ARGS);
  extern Datum drandom(PG_FUNCTION_ARGS);
  extern Datum setseed(PG_FUNCTION_ARGS);
+ extern Datum float8_combine(PG_FUNCTION_ARGS);
  extern Datum float8_accum(PG_FUNCTION_ARGS);
  extern Datum float4_accum(PG_FUNCTION_ARGS);
 +#ifdef PGXC
 +extern Datum float8_collect(PG_FUNCTION_ARGS);
 +#endif
  extern Datum float8_avg(PG_FUNCTION_ARGS);
  extern Datum float8_var_pop(PG_FUNCTION_ARGS);
  extern Datum float8_var_samp(PG_FUNCTION_ARGS);
  extern Datum float8_stddev_pop(PG_FUNCTION_ARGS);
  extern Datum float8_stddev_samp(PG_FUNCTION_ARGS);
  extern Datum float8_regr_accum(PG_FUNCTION_ARGS);
 +#ifdef PGXC
 +extern Datum float8_regr_collect(PG_FUNCTION_ARGS);
 +#endif
+ extern Datum float8_regr_combine(PG_FUNCTION_ARGS);
  extern Datum float8_regr_sxx(PG_FUNCTION_ARGS);
  extern Datum float8_regr_syy(PG_FUNCTION_ARGS);
  extern Datum float8_regr_sxy(PG_FUNCTION_ARGS);
@@@ -1061,17 -1084,16 +1107,23 @@@ extern Datum numeric_accum_inv(PG_FUNCT
  extern Datum int2_accum(PG_FUNCTION_ARGS);
  extern Datum int4_accum(PG_FUNCTION_ARGS);
  extern Datum int8_accum(PG_FUNCTION_ARGS);
 +#ifdef PGXC
 +extern Datum numeric_collect(PG_FUNCTION_ARGS);
 +extern Datum numeric_poly_collect(PG_FUNCTION_ARGS);
 +#endif
+ extern Datum numeric_poly_combine(PG_FUNCTION_ARGS);
+ extern Datum numeric_poly_serialize(PG_FUNCTION_ARGS);
+ extern Datum numeric_poly_deserialize(PG_FUNCTION_ARGS);
  extern Datum int2_accum_inv(PG_FUNCTION_ARGS);
  extern Datum int4_accum_inv(PG_FUNCTION_ARGS);
  extern Datum int8_accum_inv(PG_FUNCTION_ARGS);
  extern Datum int8_avg_accum(PG_FUNCTION_ARGS);
 +#ifdef PGXC
 +extern Datum numeric_avg_collect(PG_FUNCTION_ARGS);
 +#endif
+ extern Datum int8_avg_combine(PG_FUNCTION_ARGS);
+ extern Datum int8_avg_serialize(PG_FUNCTION_ARGS);
+ extern Datum int8_avg_deserialize(PG_FUNCTION_ARGS);
  extern Datum numeric_avg(PG_FUNCTION_ARGS);
  extern Datum numeric_sum(PG_FUNCTION_ARGS);
  extern Datum numeric_var_pop(PG_FUNCTION_ARGS);
@@@ -1087,14 -1109,9 +1139,15 @@@ extern Datum numeric_poly_stddev_samp(P
  extern Datum int2_sum(PG_FUNCTION_ARGS);
  extern Datum int4_sum(PG_FUNCTION_ARGS);
  extern Datum int8_sum(PG_FUNCTION_ARGS);
 +#ifdef PGXC
 +extern Datum int8_sum_to_int8(PG_FUNCTION_ARGS);
 +#endif
  extern Datum int2_avg_accum(PG_FUNCTION_ARGS);
  extern Datum int4_avg_accum(PG_FUNCTION_ARGS);
 +#ifdef PGXC
 +extern Datum int8_avg_collect(PG_FUNCTION_ARGS);
 +#endif
+ extern Datum int4_avg_combine(PG_FUNCTION_ARGS);
  extern Datum int2_avg_accum_inv(PG_FUNCTION_ARGS);
  extern Datum int4_avg_accum_inv(PG_FUNCTION_ARGS);
  extern Datum int8_avg_accum_inv(PG_FUNCTION_ARGS);
Simple merge
index 7e119afb7220686e504678fe0248bf1821e6f84a,e1de1a5d0653bdb72ecbdb6a92207f3e5e4fa3ea..a831fa27777e080818be5e52d5010fddaf137a74
@@@ -4,8 -4,7 +4,8 @@@
   * External declarations pertaining to backend/utils/misc/guc.c and
   * backend/utils/misc/guc-file.l
   *
-  * Copyright (c) 2000-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Copyright (c) 2000-2016, PostgreSQL Global Development Group
   * Written by Peter Eisentraut <[email protected]>.
   *
   * src/include/utils/guc.h
Simple merge
Simple merge
index bd96e8c7e565ff83c22fb6a8cf05934cca548681,dcb89807e9d2232947a407979a153a9eca7799eb..f72233c33557ea25a04e120f2b96ce87808cf1cb
@@@ -3,8 -3,7 +3,8 @@@
   * lsyscache.h
   *      Convenience routines for common queries in the system catalog cache.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/utils/lsyscache.h
index af73478c4041f6091f2bc900198676b76a337c9b,938c4afc8be0bd81463a35d9c75799b95e0e08e6..41b10ab37260cab994947c8c4fbb85f66cb94bda
@@@ -5,8 -5,7 +5,8 @@@
   *
   * See plancache.c for comments.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/utils/plancache.h
@@@ -110,11 -111,6 +112,9 @@@ typedef struct CachedPlanSourc
        double          generic_cost;   /* cost of generic plan, or -1 if not known */
        double          total_custom_cost;              /* total cost of custom plans so far */
        int                     num_custom_plans;               /* number of plans included in total */
-       bool            hasRowSecurity; /* planned with row security? */
-       bool            row_security_env;               /* row security setting when planned */
 +#ifdef PGXC
 +      char       *stmt_name;          /* If set, this is a copy of prepared stmt name */
 +#endif
  } CachedPlanSource;
  
  /*
index bcaf5cc3c8ee781ec9b6fe2b7b059bca37156d82,c1d93a96bb46a19bcff9ce863ff34a745f96ddaf..ba11a5cf5f2fbfa921ea274fabf0f51a9f2e59db
@@@ -36,8 -36,7 +36,8 @@@
   * to look like NO SCROLL cursors.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/utils/portal.h
@@@ -162,10 -161,17 +166,20 @@@ typedef struct PortalDat
         */
        Tuplestorestate *holdStore; /* store for holdable cursors */
        MemoryContext holdContext;      /* memory containing holdStore */
 +#ifdef XCP
 +      MemoryContext tmpContext;       /* temporary memory */
 +#endif
  
+       /*
+        * Snapshot under which tuples in the holdStore were read.  We must keep a
+        * reference to this snapshot if there is any possibility that the tuples
+        * contain TOAST references, because releasing the snapshot could allow
+        * recently-dead rows to be vacuumed away, along with any toast data
+        * belonging to them.  In the case of a held cursor, we avoid needing to
+        * keep such a snapshot by forcibly detoasting the data.
+        */
+       Snapshot        holdSnapshot;   /* registered snapshot, or NULL if none */
        /*
         * atStart, atEnd and portalPos indicate the current cursor position.
         * portalPos is zero before the first row, N after fetching N'th row of
index 2b9db7cc3dde1a0bf5c2c5d56c5c5afda6d2c0e2,ed14442cfe877d5b97a60bcb768ddd8833a75195..f793dd3719ee0bac340833ea2a8d34c1bce66e2c
@@@ -4,10 -4,8 +4,10 @@@
   *      POSTGRES relation descriptor (a/k/a relcache entry) definitions.
   *
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/utils/rel.h
   *
@@@ -192,11 -172,36 +180,39 @@@ typedef struct RelationDat
  
        /* use "struct" here to avoid needing to include pgstat.h: */
        struct PgStat_TableStatus *pgstat_info;         /* statistics collection area */
 +#ifdef PGXC
 +      RelationLocInfo *rd_locator_info;
 +#endif
  } RelationData;
  
+ /*
+  * ForeignKeyCacheInfo
+  *            Information the relcache can cache about foreign key constraints
+  *
+  * This is basically just an image of relevant columns from pg_constraint.
+  * We make it a subclass of Node so that copyObject() can be used on a list
+  * of these, but we also ensure it is a "flat" object without substructure,
+  * so that list_free_deep() is sufficient to free such a list.
+  * The per-FK-column arrays can be fixed-size because we allow at most
+  * INDEX_MAX_KEYS columns in a foreign key constraint.
+  *
+  * Currently, we only cache fields of interest to the planner, but the
+  * set of fields could be expanded in future.
+  */
+ typedef struct ForeignKeyCacheInfo
+ {
+       NodeTag         type;
+       Oid                     conrelid;               /* relation constrained by the foreign key */
+       Oid                     confrelid;              /* relation referenced by the foreign key */
+       int                     nkeys;                  /* number of columns in the foreign key */
+       /* these arrays each have nkeys valid entries: */
+       AttrNumber      conkey[INDEX_MAX_KEYS]; /* cols in referencing table */
+       AttrNumber      confkey[INDEX_MAX_KEYS];                /* cols in referenced table */
+       Oid                     conpfeqop[INDEX_MAX_KEYS];              /* PK = FK operator OIDs */
+ } ForeignKeyCacheInfo;
  /*
   * StdRdOptions
   *            Standard contents of rd_options for heaps and generic indexes.
Simple merge
index d9fc3380b56ffe8ad7fa14c35c6a74962fad4f9a,998e2e593d06ea07eb6b6459332e70d6d7dd3820..7cc6f3894d5f58da6b2495858af3990e317a4f4e
@@@ -3,9 -3,8 +3,9 @@@
   * snapshot.h
   *      POSTGRES snapshot definition
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/utils/snapshot.h
   *
index 84c778936449b874c89a0e85c0e9a21b78437d54,256615b67134583ffe926cd9d046cf966ed6a49f..5a16368f12f7415c04b5868ba1de0a9acc3c900b
@@@ -6,9 -6,8 +6,9 @@@
   * See also lsyscache.h, which provides convenience routines for
   * common cache-lookup operations.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
 + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group
   *
   * src/include/utils/syscache.h
   *
index a8db53fd57c0cf4a7604411bf179bdc0388916f4,85cc7ce1fe8cf8dc1253a7488dc67c74c41687d8..3e8f4855d05d4d156a27f4650d3c3830bc540b0b
@@@ -189,9 -187,7 +190,10 @@@ extern Datum interval_mul(PG_FUNCTION_A
  extern Datum mul_d_interval(PG_FUNCTION_ARGS);
  extern Datum interval_div(PG_FUNCTION_ARGS);
  extern Datum interval_accum(PG_FUNCTION_ARGS);
 +#ifdef PGXC
 +extern Datum interval_collect(PG_FUNCTION_ARGS);
 +#endif
+ extern Datum interval_combine(PG_FUNCTION_ARGS);
  extern Datum interval_accum_inv(PG_FUNCTION_ARGS);
  extern Datum interval_avg(PG_FUNCTION_ARGS);
  
index 0a4910ba2391ceaedf653ba8fd02927572b4621f,5cecd6d1b8657fa1d03ca074b70d78ef0c6768b9..ac46f90c4ade6120ecdbf20e7beee12eff13cc2f
@@@ -10,8 -10,7 +10,8 @@@
   * amounts are sorted using temporary files and a standard external sort
   * algorithm.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/utils/tuplesort.h
index d0dd73be812770ffb22efedea9f6474ddde2e5f4,e864a6d8b770e3cb2d272bce9024c85fcb444047..1eecc89bcf55c24134329fa55a9b817607861693
@@@ -21,8 -21,7 +21,8 @@@
   * Also, we have changed the API to return tuples in TupleTableSlots,
   * so that there is a check to prevent attempted access to system columns.
   *
-  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
 + * Portions Copyright (c) 2012-2014, TransLattice, Inc.
+  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
   * Portions Copyright (c) 1994, Regents of the University of California
   *
   * src/include/utils/tuplestore.h
Simple merge
index b117816bde225864a496a1644d789ea230bbb841,586ff1f329546094e659c11b73936196fb9db744..428743532b7d707898171a445dc4a93695a1a63b
  #include "utils/rel.h"
  #include "utils/snapmgr.h"
  #include "utils/typcache.h"
 +#ifdef XCP
 +#include "pgxc/pgxc.h"
 +#endif
  
  
- static const char *const raise_skip_msg = "RAISE";
  typedef struct
  {
        int                     nargs;                  /* number of arguments */
index 80bb07041a15eb777eedc1f9b52718e195b83c23,0b41e3acb6c14cc2a2148b916b5cde8d412ea0bf..bf029860eb1d04e40854c3380bef332a588d5286
@@@ -3469,8 -3547,8 +3547,8 @@@ check_sql_expr(const char *stmt, int lo
        syntax_errcontext.previous = error_context_stack;
        error_context_stack = &syntax_errcontext;
  
-       oldCxt = MemoryContextSwitchTo(compile_tmp_cxt);
+       oldCxt = MemoryContextSwitchTo(plpgsql_compile_tmp_cxt);
 -      (void) raw_parser(stmt);
 +      (void) raw_parser(stmt, NULL);
        MemoryContextSwitchTo(oldCxt);
  
        /* Restore former ereport callback */
Simple merge
index 44c323f18f45bd5c95dc66fb57cbbfd26ce94d5b,14646c6397c166c27999ee1c00df2853a84b6746..77bc871592428ca4ae1784276dab0e00314b2d2e
@@@ -586,10 -575,16 +586,16 @@@ select max(unique1) from tenk1 where un
   9999
  (1 row)
  
- explain (costs off, nodes off)
+ -- the planner may choose a generic aggregate here if parallel query is
+ -- enabled, since that plan will be parallel safe and the "optimized"
+ -- plan, which has almost identical cost, will not be.  we want to test
+ -- the optimized plan, so temporarily disable parallel query.
+ begin;
+ set local max_parallel_workers_per_gather = 0;
+ explain (costs off)
    select max(unique1) from tenk1 where unique1 > 42000;
 -                                QUERY PLAN                                 
 ----------------------------------------------------------------------------
 +                                      QUERY PLAN                                       
 +---------------------------------------------------------------------------------------
   Result
     InitPlan 1 (returns $0)
       ->  Limit
@@@ -605,11 -598,12 +611,12 @@@ select max(unique1) from tenk1 where un
      
  (1 row)
  
+ rollback;
  -- multi-column index (uses tenk1_thous_tenthous)
 -explain (costs off)
 +explain (costs off, nodes off)
    select max(tenthous) from tenk1 where thousand = 33;
 -                                 QUERY PLAN                                 
 -----------------------------------------------------------------------------
 +                                       QUERY PLAN                                       
 +----------------------------------------------------------------------------------------
   Result
     InitPlan 1 (returns $0)
       ->  Limit
@@@ -795,19 -790,34 +821,34 @@@ insert into minmaxtest values(11), (12)
  insert into minmaxtest1 values(13), (14);
  insert into minmaxtest2 values(15), (16);
  insert into minmaxtest3 values(17), (18);
 -explain (costs off)
 +explain (costs off, nodes off)
    select min(f1), max(f1) from minmaxtest;
-                    QUERY PLAN                    
- -------------------------------------------------
-  Aggregate
-    ->  Remote Subquery Scan on all
-          ->  Aggregate
-                ->  Append
-                      ->  Seq Scan on minmaxtest
-                      ->  Seq Scan on minmaxtest1
-                      ->  Seq Scan on minmaxtest2
-                      ->  Seq Scan on minmaxtest3
- (8 rows)
+                                           QUERY PLAN                                          
+ ----------------------------------------------------------------------------------------------
+  Result
+    InitPlan 1 (returns $0)
+      ->  Limit
+            ->  Merge Append
+                  Sort Key: minmaxtest.f1
+                  ->  Index Only Scan using minmaxtesti on minmaxtest
+                        Index Cond: (f1 IS NOT NULL)
+                  ->  Index Only Scan using minmaxtest1i on minmaxtest1
+                        Index Cond: (f1 IS NOT NULL)
+                  ->  Index Only Scan Backward using minmaxtest2i on minmaxtest2
+                        Index Cond: (f1 IS NOT NULL)
+                  ->  Index Only Scan using minmaxtest3i on minmaxtest3
+    InitPlan 2 (returns $1)
+      ->  Limit
+            ->  Merge Append
+                  Sort Key: minmaxtest_1.f1 DESC
+                  ->  Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_1
+                        Index Cond: (f1 IS NOT NULL)
+                  ->  Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest1_1
+                        Index Cond: (f1 IS NOT NULL)
+                  ->  Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest2_1
+                        Index Cond: (f1 IS NOT NULL)
+                  ->  Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest3_1
+ (23 rows)
  
  select min(f1), max(f1) from minmaxtest;
   min | max 
@@@ -1577,54 -1667,207 +1698,259 @@@ select least_agg(variadic array[q1,q2]
   -4567890123456789
  (1 row)
  
 +-- int8 aggregates for distributed tables
 +CREATE TABLE int8_tbl_aggtest AS SELECT * FROM int8_tbl;
 +SELECT avg(q1) FROM int8_tbl_aggtest;
 +          avg          
 +-----------------------
 + 2740734074074122.6000
 +(1 row)
 +
 +SELECT sum(q1) FROM int8_tbl_aggtest;
 +        sum        
 +-------------------
 + 13703670370370613
 +(1 row)
 +
 +SELECT max(q1) FROM int8_tbl_aggtest;
 +       max        
 +------------------
 + 4567890123456789
 +(1 row)
 +
 +SELECT min(q1) FROM int8_tbl_aggtest;
 + min 
 +-----
 + 123
 +(1 row)
 +
 +SELECT stddev_pop(q1) FROM int8_tbl_aggtest;
 +    stddev_pop    
 +------------------
 + 2237800000713538
 +(1 row)
 +
 +SELECT stddev_samp(q1) FROM int8_tbl_aggtest;
 +   stddev_samp    
 +------------------
 + 2501936460822274
 +(1 row)
 +
 +SELECT var_pop(q1) FROM int8_tbl_aggtest;
 +             var_pop             
 +---------------------------------
 + 5007748843193509284246811160533
 +(1 row)
 +
 +SELECT var_samp(q1) FROM int8_tbl_aggtest;
 +            var_samp             
 +---------------------------------
 + 6259686053991886605308513950667
 +(1 row)
 +
 +DROP TABLE int8_tbl_aggtest;
++
+ -- test aggregates with common transition functions share the same states
+ begin work;
+ create type avg_state as (total bigint, count bigint);
+ create or replace function avg_transfn(state avg_state, n int) returns avg_state as
+ $$
+ declare new_state avg_state;
+ begin
+       raise notice 'avg_transfn called with %', n;
+       if state is null then
+               if n is not null then
+                       new_state.total := n;
+                       new_state.count := 1;
+                       return new_state;
+               end if;
+               return null;
+       elsif n is not null then
+               state.total := state.total + n;
+               state.count := state.count + 1;
+               return state;
+       end if;
+       return null;
+ end
+ $$ language plpgsql;
+ create function avg_finalfn(state avg_state) returns int4 as
+ $$
+ begin
+       if state is null then
+               return NULL;
+       else
+               return state.total / state.count;
+       end if;
+ end
+ $$ language plpgsql;
+ create function sum_finalfn(state avg_state) returns int4 as
+ $$
+ begin
+       if state is null then
+               return NULL;
+       else
+               return state.total;
+       end if;
+ end
+ $$ language plpgsql;
+ create aggregate my_avg(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = avg_finalfn
+ );
+ create aggregate my_sum(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = sum_finalfn
+ );
+ -- aggregate state should be shared as aggs are the same.
+ select my_avg(one),my_avg(one) from (values(1),(3)) t(one);
+ NOTICE:  avg_transfn called with 1
+ NOTICE:  avg_transfn called with 3
+  my_avg | my_avg 
+ --------+--------
+       2 |      2
+ (1 row)
+ -- aggregate state should be shared as transfn is the same for both aggs.
+ select my_avg(one),my_sum(one) from (values(1),(3)) t(one);
+ NOTICE:  avg_transfn called with 1
+ NOTICE:  avg_transfn called with 3
+  my_avg | my_sum 
+ --------+--------
+       2 |      4
+ (1 row)
+ -- shouldn't share states due to the distinctness not matching.
+ select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one);
+ NOTICE:  avg_transfn called with 1
+ NOTICE:  avg_transfn called with 3
+ NOTICE:  avg_transfn called with 1
+ NOTICE:  avg_transfn called with 3
+  my_avg | my_sum 
+ --------+--------
+       2 |      4
+ (1 row)
+ -- shouldn't share states due to the filter clause not matching.
+ select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one);
+ NOTICE:  avg_transfn called with 1
+ NOTICE:  avg_transfn called with 3
+ NOTICE:  avg_transfn called with 3
+  my_avg | my_sum 
+ --------+--------
+       3 |      4
+ (1 row)
+ -- this should not share the state due to different input columns.
+ select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two);
+ NOTICE:  avg_transfn called with 2
+ NOTICE:  avg_transfn called with 1
+ NOTICE:  avg_transfn called with 4
+ NOTICE:  avg_transfn called with 3
+  my_avg | my_sum 
+ --------+--------
+       2 |      6
+ (1 row)
+ -- test that aggs with the same sfunc and initcond share the same agg state
+ create aggregate my_sum_init(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = sum_finalfn,
+    initcond = '(10,0)'
+ );
+ create aggregate my_avg_init(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = avg_finalfn,
+    initcond = '(10,0)'
+ );
+ create aggregate my_avg_init2(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = avg_finalfn,
+    initcond = '(4,0)'
+ );
+ -- state should be shared if INITCONDs are matching
+ select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one);
+ NOTICE:  avg_transfn called with 1
+ NOTICE:  avg_transfn called with 3
+  my_sum_init | my_avg_init 
+ -------------+-------------
+           14 |           7
+ (1 row)
+ -- Varying INITCONDs should cause the states not to be shared.
+ select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one);
+ NOTICE:  avg_transfn called with 1
+ NOTICE:  avg_transfn called with 1
+ NOTICE:  avg_transfn called with 3
+ NOTICE:  avg_transfn called with 3
+  my_sum_init | my_avg_init2 
+ -------------+--------------
+           14 |            4
+ (1 row)
+ rollback;
+ -- test aggregate state sharing to ensure it works if one aggregate has a
+ -- finalfn and the other one has none.
+ begin work;
+ create or replace function sum_transfn(state int4, n int4) returns int4 as
+ $$
+ declare new_state int4;
+ begin
+       raise notice 'sum_transfn called with %', n;
+       if state is null then
+               if n is not null then
+                       new_state := n;
+                       return new_state;
+               end if;
+               return null;
+       elsif n is not null then
+               state := state + n;
+               return state;
+       end if;
+       return null;
+ end
+ $$ language plpgsql;
+ create function halfsum_finalfn(state int4) returns int4 as
+ $$
+ begin
+       if state is null then
+               return NULL;
+       else
+               return state / 2;
+       end if;
+ end
+ $$ language plpgsql;
+ create aggregate my_sum(int4)
+ (
+    stype = int4,
+    sfunc = sum_transfn
+ );
+ create aggregate my_half_sum(int4)
+ (
+    stype = int4,
+    sfunc = sum_transfn,
+    finalfunc = halfsum_finalfn
+ );
+ -- Agg state should be shared even though my_sum has no finalfn
+ select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one);
+ NOTICE:  sum_transfn called with 1
+ NOTICE:  sum_transfn called with 2
+ NOTICE:  sum_transfn called with 3
+ NOTICE:  sum_transfn called with 4
+  my_sum | my_half_sum 
+ --------+-------------
+      10 |           5
+ (1 row)
+ rollback;
index 420aae39767a276886d2a27372d3ea794657100f,3232cda02366e2e46fc8ada0ff4cd2ec68f4a2cb..ee07e0b0f092b3cb4041a0554f82cff6e5d05671
@@@ -427,19 -435,31 +436,32 @@@ explain (costs off, nodes off) select 
  
  -- after validation, the constraint should be used
  alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check;
 -explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
 -                                QUERY PLAN                                 
 ----------------------------------------------------------------------------
 - Append
 -   ->  Seq Scan on nv_parent
 -         Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
 -   ->  Seq Scan on nv_child_2010
 -         Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
 -   ->  Seq Scan on nv_child_2009
 -         Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
 -(7 rows)
 +explain (costs off, nodes off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
 +                                   QUERY PLAN                                    
 +---------------------------------------------------------------------------------
 + Remote Subquery Scan on all
 +   ->  Append
 +         ->  Seq Scan on nv_parent
 +               Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
 +         ->  Seq Scan on nv_child_2010
 +               Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
 +         ->  Seq Scan on nv_child_2009
 +               Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
 +(8 rows)
  
+ -- add an inherited NOT VALID constraint
+ alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid;
+ \d nv_child_2009
+ Table "public.nv_child_2009"
+  Column | Type | Modifiers 
+ --------+------+-----------
+  d      | date | 
+ Check constraints:
+     "nv_child_2009_d_check" CHECK (d >= '01-01-2009'::date AND d <= '12-31-2009'::date)
+     "nv_parent_d_check" CHECK (d >= '01-01-2001'::date AND d <= '12-31-2099'::date) NOT VALID
+ Inherits: nv_parent
+ -- we leave nv_parent and children around to help test pg_dump logic
  -- Foreign key adding test with mixed types
  -- Note: these tables are TEMP to avoid name conflicts when this test
  -- is run in parallel with foreign_key.sql.
@@@ -1343,11 -1388,11 +1386,11 @@@ select f1 from c1
  ERROR:  column "f1" does not exist
  LINE 1: select f1 from c1;
                 ^
- HINT:  Perhaps you meant to reference the column "c1"."f2".
+ HINT:  Perhaps you meant to reference the column "c1.f2".
  drop table p1 cascade;
  NOTICE:  drop cascades to table c1
 -create table p1 (f1 int, f2 int);
 -create table c1 () inherits(p1);
 +create table p1 (f1 int, f2 int) distribute by roundrobin;
 +create table c1 () inherits(p1) distribute by roundrobin;
  -- should be rejected since c1.f1 is inherited
  alter table c1 drop column f1;
  ERROR:  cannot drop inherited column "f1"
@@@ -1357,11 -1402,11 +1400,11 @@@ select f1 from c1
  ERROR:  column "f1" does not exist
  LINE 1: select f1 from c1;
                 ^
- HINT:  Perhaps you meant to reference the column "c1"."f2".
+ HINT:  Perhaps you meant to reference the column "c1.f2".
  drop table p1 cascade;
  NOTICE:  drop cascades to table c1
 -create table p1 (f1 int, f2 int);
 -create table c1 () inherits(p1);
 +create table p1 (f1 int, f2 int) distribute by roundrobin;
 +create table c1 () inherits(p1) distribute by roundrobin;
  -- should be rejected since c1.f1 is inherited
  alter table c1 drop column f1;
  ERROR:  cannot drop inherited column "f1"
@@@ -1985,10 -2176,57 +2165,51 @@@ ERROR:  current transaction is aborted
  commit;
  begin;
  alter table alterlock2 validate constraint alterlock2nv;
 +ERROR:  constraint "alterlock2nv" of relation "alterlock2" does not exist
  select * from my_locks order by 1;
 -     relname     |       max_lockmode       
 ------------------+--------------------------
 - alterlock       | RowShareLock
 - alterlock2      | ShareUpdateExclusiveLock
 - alterlock2_pkey | AccessShareLock
 - alterlock_pkey  | AccessShareLock
 -(4 rows)
 -
 +ERROR:  current transaction is aborted, commands ignored until end of transaction block
  rollback;
+ create or replace view my_locks as
+ select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode
+ from pg_locks l join pg_class c on l.relation = c.oid
+ where virtualtransaction = (
+         select virtualtransaction
+         from pg_locks
+         where transactionid = txid_current()::integer)
+ and locktype = 'relation'
+ and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog')
+ and c.relname = 'my_locks'
+ group by c.relname;
+ -- raise exception
+ alter table my_locks set (autovacuum_enabled = false);
+ ERROR:  unrecognized parameter "autovacuum_enabled"
+ alter view my_locks set (autovacuum_enabled = false);
+ ERROR:  unrecognized parameter "autovacuum_enabled"
+ alter table my_locks reset (autovacuum_enabled);
+ alter view my_locks reset (autovacuum_enabled);
+ begin;
+ alter view my_locks set (security_barrier=off);
+ select * from my_locks order by 1;
+  relname  |    max_lockmode     
+ ----------+---------------------
+  my_locks | AccessExclusiveLock
+ (1 row)
+ alter view my_locks reset (security_barrier);
+ rollback;
+ -- this test intentionally applies the ALTER TABLE command against a view, but
+ -- uses a view option so we expect this to succeed. This form of SQL is
+ -- accepted for historical reasons, as shown in the docs for ALTER VIEW
+ begin;
+ alter table my_locks set (security_barrier=off);
+ select * from my_locks order by 1;
+  relname  |    max_lockmode     
+ ----------+---------------------
+  my_locks | AccessExclusiveLock
+ (1 row)
+ alter table my_locks reset (security_barrier);
+ rollback;
  -- cleanup
  drop table alterlock2;
  drop table alterlock;
@@@ -2500,12 -2799,11 +2785,13 @@@ ORDER BY relname
  (5 rows)
  
  CREATE TABLE logged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged1); -- foreign key
 +ERROR:  Hash/Modulo distribution column does not refer to hash/modulo distribution column in referenced table.
  CREATE TABLE logged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged3); -- self-referencing foreign key
  ALTER TABLE logged1 SET UNLOGGED; -- fails because a foreign key from a permanent table exists
+ ERROR:  could not change table "logged1" to unlogged because it references logged table "logged2"
  ALTER TABLE logged3 SET UNLOGGED; -- skip self-referencing foreign key
  ALTER TABLE logged2 SET UNLOGGED;
 +ERROR:  relation "logged2" does not exist
  ALTER TABLE logged1 SET UNLOGGED;
  -- check relpersistence of a permanent table after changing to unlogged
  SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1'
@@@ -2526,5 -2824,93 +2812,94 @@@ ORDER BY relname
  ALTER TABLE logged1 SET UNLOGGED; -- silently do nothing
  DROP TABLE logged3;
  DROP TABLE logged2;
 +ERROR:  table "logged2" does not exist
  DROP TABLE logged1;
+ -- test ADD COLUMN IF NOT EXISTS
+ CREATE TABLE test_add_column(c1 integer);
+ \d test_add_column
+ Table "public.test_add_column"
+  Column |  Type   | Modifiers 
+ --------+---------+-----------
+  c1     | integer | 
+ ALTER TABLE test_add_column
+       ADD COLUMN c2 integer;
+ \d test_add_column
+ Table "public.test_add_column"
+  Column |  Type   | Modifiers 
+ --------+---------+-----------
+  c1     | integer | 
+  c2     | integer | 
+ ALTER TABLE test_add_column
+       ADD COLUMN c2 integer; -- fail because c2 already exists
+ ERROR:  column "c2" of relation "test_add_column" already exists
+ \d test_add_column
+ Table "public.test_add_column"
+  Column |  Type   | Modifiers 
+ --------+---------+-----------
+  c1     | integer | 
+  c2     | integer | 
+ ALTER TABLE test_add_column
+       ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists
+ NOTICE:  column "c2" of relation "test_add_column" already exists, skipping
+ \d test_add_column
+ Table "public.test_add_column"
+  Column |  Type   | Modifiers 
+ --------+---------+-----------
+  c1     | integer | 
+  c2     | integer | 
+ ALTER TABLE test_add_column
+       ADD COLUMN c2 integer, -- fail because c2 already exists
+       ADD COLUMN c3 integer;
+ ERROR:  column "c2" of relation "test_add_column" already exists
+ \d test_add_column
+ Table "public.test_add_column"
+  Column |  Type   | Modifiers 
+ --------+---------+-----------
+  c1     | integer | 
+  c2     | integer | 
+ ALTER TABLE test_add_column
+       ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+       ADD COLUMN c3 integer; -- fail because c3 already exists
+ NOTICE:  column "c2" of relation "test_add_column" already exists, skipping
+ \d test_add_column
+ Table "public.test_add_column"
+  Column |  Type   | Modifiers 
+ --------+---------+-----------
+  c1     | integer | 
+  c2     | integer | 
+  c3     | integer | 
+ ALTER TABLE test_add_column
+       ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+       ADD COLUMN IF NOT EXISTS c3 integer; -- skipping because c3 already exists
+ NOTICE:  column "c2" of relation "test_add_column" already exists, skipping
+ NOTICE:  column "c3" of relation "test_add_column" already exists, skipping
+ \d test_add_column
+ Table "public.test_add_column"
+  Column |  Type   | Modifiers 
+ --------+---------+-----------
+  c1     | integer | 
+  c2     | integer | 
+  c3     | integer | 
+ ALTER TABLE test_add_column
+       ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+       ADD COLUMN IF NOT EXISTS c3 integer, -- skipping because c3 already exists
+       ADD COLUMN c4 integer;
+ NOTICE:  column "c2" of relation "test_add_column" already exists, skipping
+ NOTICE:  column "c3" of relation "test_add_column" already exists, skipping
+ \d test_add_column
+ Table "public.test_add_column"
+  Column |  Type   | Modifiers 
+ --------+---------+-----------
+  c1     | integer | 
+  c2     | integer | 
+  c3     | integer | 
+  c4     | integer | 
+ DROP TABLE test_add_column;
index 0a01526cd75a4966ff7f7acff64e7b81b76b4f7d,baccca14afdf13cacef79b25998c90f8b3475b42..53cea0e4264da6f810a22f69f794e51e66bfd598
@@@ -131,14 -125,24 +131,24 @@@ SELECT a[1:3]
   {16,25,23} | {}                    | {foobar,new_word} | {{elt2}}
  (3 rows)
  
+ SELECT b[1:1][2][2],
+        d[1:1][2]
+    FROM arrtest;
+            b           |       d       
+ -----------------------+---------------
+  {{{113,142},{1,147}}} | {}
+  {}                    | {}
+  {}                    | {{elt1,elt2}}
+ (3 rows)
  INSERT INTO arrtest(a) VALUES('{1,null,3}');
 -SELECT a FROM arrtest;
 +SELECT a FROM arrtest ORDER BY 1;
         a       
  ---------------
 - {16,25,3,4,5}
   {}
 - {16,25,23}
   {1,NULL,3}
 + {16,25,3,4,5}
 + {16,25,23}
  (4 rows)
  
  UPDATE arrtest SET a[4] = NULL WHERE a[2] IS NULL;
Simple merge
Simple merge
index eef7f29196ec4509b4f72f2447ba97807fc8b153,35b6476e501570ee3a273994362eccc30ea8ae97..8ff5abfa05639b1231753168b6c19de4395e0838
@@@ -295,15 -287,54 +295,54 @@@ UPDATE CASE_TB
                  ELSE (3 * j) END
    FROM CASE2_TBL b
    WHERE j = -CASE_TBL.i;
 -SELECT * FROM CASE_TBL;
 +SELECT * FROM CASE_TBL ORDER BY i, f;
    i  |   f   
  -----+-------
 -   8 |  20.2
 -  -9 | -30.3
   -12 |      
 +  -9 | -30.3
    -8 |  10.1
 +   8 |  20.2
  (4 rows)
  
+ --
+ -- Nested CASE expressions
+ --
+ -- This test exercises a bug caused by aliasing econtext->caseValue_isNull
+ -- with the isNull argument of the inner CASE's ExecEvalCase() call.  After
+ -- evaluating the vol(null) expression in the inner CASE's second WHEN-clause,
+ -- the isNull flag for the case test value incorrectly became true, causing
+ -- the third WHEN-clause not to match.  The volatile function calls are needed
+ -- to prevent constant-folding in the planner, which would hide the bug.
+ CREATE FUNCTION vol(text) returns text as
+   'begin return $1; end' language plpgsql volatile;
+ SELECT CASE
+   (CASE vol('bar')
+     WHEN 'foo' THEN 'it was foo!'
+     WHEN vol(null) THEN 'null input'
+     WHEN 'bar' THEN 'it was bar!' END
+   )
+   WHEN 'it was foo!' THEN 'foo recognized'
+   WHEN 'it was bar!' THEN 'bar recognized'
+   ELSE 'unrecognized' END;
+       case      
+ ----------------
+  bar recognized
+ (1 row)
+ -- In this case, we can't inline the SQL function without confusing things.
+ CREATE DOMAIN foodomain AS text;
+ CREATE FUNCTION volfoo(text) returns foodomain as
+   'begin return $1::foodomain; end' language plpgsql volatile;
+ CREATE FUNCTION inline_eq(foodomain, foodomain) returns boolean as
+   'SELECT CASE $2::text WHEN $1::text THEN true ELSE false END' language sql;
+ CREATE OPERATOR = (procedure = inline_eq,
+                    leftarg = foodomain, rightarg = foodomain);
+ SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' END;
+     case    
+ ------------
+  is not foo
+ (1 row)
  --
  -- Clean up
  --
Simple merge
Simple merge
index 400e5bd60bb7c13711584e4c563f1a5126d42158,97edde17cf7fe6c622d72b3241eb177aee245025..4c60b36f61e55ce9faef16a0f0919d5d765811b1
@@@ -244,5 -228,30 +244,32 @@@ DROP TYPE ctlty1
  DROP VIEW ctlv1;
  DROP TABLE IF EXISTS ctlt4, ctlt10, ctlt11, ctlt11a, ctlt12;
  NOTICE:  table "ctlt10" does not exist, skipping
 +NOTICE:  table "ctlt11" does not exist, skipping
 +NOTICE:  table "ctlt11a" does not exist, skipping
+ /* LIKE WITH OIDS */
+ CREATE TABLE has_oid (x INTEGER) WITH OIDS;
+ CREATE TABLE no_oid (y INTEGER);
+ CREATE TABLE like_test (z INTEGER, LIKE has_oid);
+ SELECT oid FROM like_test;
+  oid 
+ -----
+ (0 rows)
+ CREATE TABLE like_test2 (z INTEGER, LIKE no_oid);
+ SELECT oid FROM like_test2; -- fail
+ ERROR:  column "oid" does not exist
+ LINE 1: SELECT oid FROM like_test2;
+                ^
+ CREATE TABLE like_test3 (z INTEGER, LIKE has_oid, LIKE no_oid);
+ SELECT oid FROM like_test3;
+  oid 
+ -----
+ (0 rows)
+ CREATE TABLE like_test4 (z INTEGER, PRIMARY KEY(oid), LIKE has_oid);
+ SELECT oid FROM like_test4;
+  oid 
+ -----
+ (0 rows)
+ DROP TABLE has_oid, no_oid, like_test, like_test2, like_test3, like_test4;
Simple merge
Simple merge
index 0402d989cb3cf059a9e9253514c00ab77fa19229,e12455201e976ceb445965ae6a71363252a89cdc..314c88ffeb878ef086120ed43f9dc7311f76eb91
@@@ -29,17 -28,16 +29,17 @@@ ERROR:  EVENT TRIGGER not yet supporte
  -- OK
  create event trigger regress_event_trigger_end on ddl_command_end
     execute procedure test_event_trigger();
 +ERROR:  EVENT TRIGGER not yet supported in Postgres-XL
  -- should fail, food is not a valid filter variable
  create event trigger regress_event_trigger2 on ddl_command_start
-    when food in ('sandwhich')
+    when food in ('sandwich')
     execute procedure test_event_trigger();
 -ERROR:  unrecognized filter variable "food"
 --- should fail, sandwich is not a valid command tag
 +ERROR:  EVENT TRIGGER not yet supported in Postgres-XL
 +-- should fail, sandwhich is not a valid command tag
  create event trigger regress_event_trigger2 on ddl_command_start
-    when tag in ('sandwhich')
+    when tag in ('sandwich')
     execute procedure test_event_trigger();
 -ERROR:  filter value "sandwich" not recognized for filter variable "tag"
 +ERROR:  EVENT TRIGGER not yet supported in Postgres-XL
  -- should fail, create skunkcabbage is not a valid command tag
  create event trigger regress_event_trigger2 on ddl_command_start
     when tag in ('create table', 'create skunkcabbage')
@@@ -88,11 -84,12 +88,11 @@@ ERROR:  event trigger "regress_event_tr
  comment on event trigger wrong.regress_event_trigger is 'test comment';
  ERROR:  event trigger name cannot be qualified
  -- drop as non-superuser should fail
- create role regression_bob;
- set role regression_bob;
+ create role regress_evt_user;
+ set role regress_evt_user;
  create event trigger regress_event_trigger_noperms on ddl_command_start
     execute procedure test_event_trigger();
 -ERROR:  permission denied to create event trigger "regress_event_trigger_noperms"
 -HINT:  Must be superuser to create an event trigger.
 +ERROR:  EVENT TRIGGER not yet supported in Postgres-XL
  reset role;
  -- all OK
  alter event trigger regress_event_trigger enable replica;
@@@ -149,15 -146,13 +151,15 @@@ NOTICE:  event trigger "regress_event_t
  drop event trigger if exists regress_event_trigger2;
  NOTICE:  event trigger "regress_event_trigger2" does not exist, skipping
  drop event trigger regress_event_trigger3;
 +ERROR:  event trigger "regress_event_trigger3" does not exist
  drop event trigger regress_event_trigger_end;
 +ERROR:  event trigger "regress_event_trigger_end" does not exist
  -- test support for dropped objects
- CREATE SCHEMA schema_one authorization regression_bob;
- CREATE SCHEMA schema_two authorization regression_bob;
- CREATE SCHEMA audit_tbls authorization regression_bob;
+ CREATE SCHEMA schema_one authorization regress_evt_user;
+ CREATE SCHEMA schema_two authorization regress_evt_user;
+ CREATE SCHEMA audit_tbls authorization regress_evt_user;
  CREATE TEMP TABLE a_temp_tbl ();
- SET SESSION AUTHORIZATION regression_bob;
+ SET SESSION AUTHORIZATION regress_evt_user;
  CREATE TABLE schema_one.table_one(a int);
  CREATE TABLE schema_one."table two"(a int);
  CREATE TABLE schema_one.table_three(a int);
@@@ -241,28 -233,85 +243,61 @@@ drop cascades to function schema_two.ne
  drop cascades to table schema_one.table_one
  drop cascades to table schema_one."table two"
  drop cascades to table schema_one.table_three
+ NOTICE:  table "schema_two_table_two" does not exist, skipping
+ NOTICE:  table "audit_tbls_schema_two_table_three" does not exist, skipping
+ ERROR:  object audit_tbls.schema_two_table_three of type table cannot be dropped
+ CONTEXT:  PL/pgSQL function undroppable() line 14 at RAISE
+ SQL statement "DROP TABLE IF EXISTS audit_tbls.schema_two_table_three"
+ PL/pgSQL function test_evtrig_dropped_objects() line 8 at EXECUTE
  DELETE FROM undroppable_objs WHERE object_identity = 'audit_tbls.schema_two_table_three';
  DROP SCHEMA schema_one, schema_two CASCADE;
- ERROR:  schema "schema_one" does not exist
+ NOTICE:  drop cascades to 7 other objects
+ DETAIL:  drop cascades to table schema_two.table_two
+ drop cascades to table schema_two.table_three
+ drop cascades to function schema_two.add(integer,integer)
+ drop cascades to function schema_two.newton(integer)
+ drop cascades to table schema_one.table_one
+ drop cascades to table schema_one."table two"
+ drop cascades to table schema_one.table_three
+ NOTICE:  table "schema_two_table_two" does not exist, skipping
+ NOTICE:  table "audit_tbls_schema_two_table_three" does not exist, skipping
+ NOTICE:  table "schema_one_table_one" does not exist, skipping
+ NOTICE:  table "schema_one_table two" does not exist, skipping
+ NOTICE:  table "schema_one_table_three" does not exist, skipping
+ ERROR:  object schema_one.table_three of type table cannot be dropped
+ CONTEXT:  PL/pgSQL function undroppable() line 14 at RAISE
  DELETE FROM undroppable_objs WHERE object_identity = 'schema_one.table_three';
  DROP SCHEMA schema_one, schema_two CASCADE;
- ERROR:  schema "schema_one" does not exist
+ NOTICE:  drop cascades to 7 other objects
+ DETAIL:  drop cascades to table schema_two.table_two
+ drop cascades to table schema_two.table_three
+ drop cascades to function schema_two.add(integer,integer)
+ drop cascades to function schema_two.newton(integer)
+ drop cascades to table schema_one.table_one
+ drop cascades to table schema_one."table two"
+ drop cascades to table schema_one.table_three
+ NOTICE:  table "schema_two_table_two" does not exist, skipping
+ NOTICE:  table "audit_tbls_schema_two_table_three" does not exist, skipping
+ NOTICE:  table "schema_one_table_one" does not exist, skipping
+ NOTICE:  table "schema_one_table two" does not exist, skipping
+ NOTICE:  table "schema_one_table_three" does not exist, skipping
  SELECT * FROM dropped_objects WHERE schema IS NULL OR schema <> 'pg_toast';
 -     type     |   schema   |               object                
 ---------------+------------+-------------------------------------
 - table column | schema_one | schema_one.table_one.a
 - schema       |            | schema_two
 - table        | schema_two | schema_two.table_two
 - type         | schema_two | schema_two.table_two
 - type         | schema_two | schema_two.table_two[]
 - table        | audit_tbls | audit_tbls.schema_two_table_three
 - type         | audit_tbls | audit_tbls.schema_two_table_three
 - type         | audit_tbls | audit_tbls.schema_two_table_three[]
 - table        | schema_two | schema_two.table_three
 - type         | schema_two | schema_two.table_three
 - type         | schema_two | schema_two.table_three[]
 - function     | schema_two | schema_two.add(integer,integer)
 - aggregate    | schema_two | schema_two.newton(integer)
 - schema       |            | schema_one
 - table        | schema_one | schema_one.table_one
 - type         | schema_one | schema_one.table_one
 - type         | schema_one | schema_one.table_one[]
 - table        | schema_one | schema_one."table two"
 - type         | schema_one | schema_one."table two"
 - type         | schema_one | schema_one."table two"[]
 - table        | schema_one | schema_one.table_three
 - type         | schema_one | schema_one.table_three
 - type         | schema_one | schema_one.table_three[]
 -(23 rows)
 + type | schema | object 
 +------+--------+--------
 +(0 rows)
  
- DROP OWNED BY regression_bob;
+ DROP OWNED BY regress_evt_user;
+ NOTICE:  schema "audit_tbls" does not exist, skipping
  SELECT * FROM dropped_objects WHERE type = 'schema';
 -  type  | schema |   object   
 ---------+--------+------------
 - schema |        | schema_two
 - schema |        | schema_one
 - schema |        | audit_tbls
 -(3 rows)
 + type | schema | object 
 +------+--------+--------
 +(0 rows)
  
- DROP ROLE regression_bob;
+ DROP ROLE regress_evt_user;
  DROP EVENT TRIGGER regress_event_trigger_drop_objects;
 +ERROR:  event trigger "regress_event_trigger_drop_objects" does not exist
  DROP EVENT TRIGGER undroppable;
 +ERROR:  event trigger "undroppable" does not exist
  CREATE OR REPLACE FUNCTION event_trigger_report_dropped()
   RETURNS event_trigger
   LANGUAGE plpgsql
index 4bc40e7db913440978eeca23a5d3291c10e2913a,20c985e5df86ee6bcf603f45bec4df2947e1fad0..061ff87813a9d62eb889744a88de10d4241917d3
@@@ -435,13 -434,117 +435,117 @@@ INSERT INTO FLOAT8_TBL(f1) VALUES ('-34
  INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30');
  INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200');
  INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
 -SELECT '' AS five, * FROM FLOAT8_TBL;
 +SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1;
   five |          f1           
  ------+-----------------------
 -      |                     0
 -      |                -34.84
 -      |               -1004.3
        | -1.2345678901234e+200
 +      |               -1004.3
 +      |                -34.84
        | -1.2345678901234e-200
 +      |                     0
  (5 rows)
  
+ -- test exact cases for trigonometric functions in degrees
+ SET extra_float_digits = 3;
+ SELECT x,
+        sind(x),
+        sind(x) IN (-1,-0.5,0,0.5,1) AS sind_exact
+ FROM (VALUES (0), (30), (90), (150), (180),
+       (210), (270), (330), (360)) AS t(x);
+   x  | sind | sind_exact 
+ -----+------+------------
+    0 |    0 | t
+   30 |  0.5 | t
+   90 |    1 | t
+  150 |  0.5 | t
+  180 |    0 | t
+  210 | -0.5 | t
+  270 |   -1 | t
+  330 | -0.5 | t
+  360 |    0 | t
+ (9 rows)
+ SELECT x,
+        cosd(x),
+        cosd(x) IN (-1,-0.5,0,0.5,1) AS cosd_exact
+ FROM (VALUES (0), (60), (90), (120), (180),
+       (240), (270), (300), (360)) AS t(x);
+   x  | cosd | cosd_exact 
+ -----+------+------------
+    0 |    1 | t
+   60 |  0.5 | t
+   90 |    0 | t
+  120 | -0.5 | t
+  180 |   -1 | t
+  240 | -0.5 | t
+  270 |    0 | t
+  300 |  0.5 | t
+  360 |    1 | t
+ (9 rows)
+ SELECT x,
+        tand(x),
+        tand(x) IN ('-Infinity'::float8,-1,0,
+                    1,'Infinity'::float8) AS tand_exact,
+        cotd(x),
+        cotd(x) IN ('-Infinity'::float8,-1,0,
+                    1,'Infinity'::float8) AS cotd_exact
+ FROM (VALUES (0), (45), (90), (135), (180),
+       (225), (270), (315), (360)) AS t(x);
+   x  |   tand    | tand_exact |   cotd    | cotd_exact 
+ -----+-----------+------------+-----------+------------
+    0 |         0 | t          |  Infinity | t
+   45 |         1 | t          |         1 | t
+   90 |  Infinity | t          |         0 | t
+  135 |        -1 | t          |        -1 | t
+  180 |         0 | t          | -Infinity | t
+  225 |         1 | t          |         1 | t
+  270 | -Infinity | t          |         0 | t
+  315 |        -1 | t          |        -1 | t
+  360 |         0 | t          |  Infinity | t
+ (9 rows)
+ SELECT x,
+        asind(x),
+        asind(x) IN (-90,-30,0,30,90) AS asind_exact,
+        acosd(x),
+        acosd(x) IN (0,60,90,120,180) AS acosd_exact
+ FROM (VALUES (-1), (-0.5), (0), (0.5), (1)) AS t(x);
+   x   | asind | asind_exact | acosd | acosd_exact 
+ ------+-------+-------------+-------+-------------
+    -1 |   -90 | t           |   180 | t
+  -0.5 |   -30 | t           |   120 | t
+     0 |     0 | t           |    90 | t
+   0.5 |    30 | t           |    60 | t
+     1 |    90 | t           |     0 | t
+ (5 rows)
+ SELECT x,
+        atand(x),
+        atand(x) IN (-90,-45,0,45,90) AS atand_exact
+ FROM (VALUES ('-Infinity'::float8), (-1), (0), (1),
+       ('Infinity'::float8)) AS t(x);
+      x     | atand | atand_exact 
+ -----------+-------+-------------
+  -Infinity |   -90 | t
+         -1 |   -45 | t
+          0 |     0 | t
+          1 |    45 | t
+   Infinity |    90 | t
+ (5 rows)
+ SELECT x, y,
+        atan2d(y, x),
+        atan2d(y, x) IN (-90,0,90,180) AS atan2d_exact
+ FROM (SELECT 10*cosd(a), 10*sind(a)
+       FROM generate_series(0, 360, 90) AS t(a)) AS t(x,y);
+   x  |  y  | atan2d | atan2d_exact 
+ -----+-----+--------+--------------
+   10 |   0 |      0 | t
+    0 |  10 |     90 | t
+  -10 |   0 |    180 | t
+    0 | -10 |    -90 | t
+   10 |   0 |      0 | t
+ (5 rows)
+ RESET extra_float_digits;
index 3824dd6ac0d43d253b346de8b0762609869e6c20,d6c1900c32aeda972aa34c734358613cc45d4171..e77ce00204ae6571ed54f12e23a502630708b456
@@@ -12,20 -12,17 +12,20 @@@ CREATE ROLE regress_test_role
  CREATE ROLE regress_test_role2;
  CREATE ROLE regress_test_role_super SUPERUSER;
  CREATE ROLE regress_test_indirect;
- CREATE ROLE unprivileged_role;
+ CREATE ROLE regress_unprivileged_role;
  CREATE FOREIGN DATA WRAPPER dummy;
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless';
 +ERROR:  foreign-data wrapper "dummy" does not exist
  CREATE FOREIGN DATA WRAPPER postgresql VALIDATOR postgresql_fdw_validator;
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  -- At this point we should have 2 built-in wrappers and no servers.
  SELECT fdwname, fdwhandler::regproc, fdwvalidator::regproc, fdwoptions FROM pg_foreign_data_wrapper ORDER BY 1, 2, 3;
 -  fdwname   | fdwhandler |       fdwvalidator       | fdwoptions 
 -------------+------------+--------------------------+------------
 - dummy      | -          | -                        | 
 - postgresql | -          | postgresql_fdw_validator | 
 -(2 rows)
 + fdwname | fdwhandler | fdwvalidator | fdwoptions 
 +---------+------------+--------------+------------
 +(0 rows)
  
  SELECT srvname, srvoptions FROM pg_foreign_server;
   srvname | srvoptions 
@@@ -39,60 -36,58 +39,60 @@@ SELECT * FROM pg_user_mapping
  
  -- CREATE FOREIGN DATA WRAPPER
  CREATE FOREIGN DATA WRAPPER foo VALIDATOR bar;            -- ERROR
 -ERROR:  function bar(text[], oid) does not exist
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  CREATE FOREIGN DATA WRAPPER foo;
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  \dew
-    List of foreign-data wrappers
-  Name | Owner | Handler | Validator 
- ------+-------+---------+-----------
+                         List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         
+ ------------+---------------------------+---------+--------------------------
 - dummy      | regress_foreign_data_user | -       | -
 - foo        | regress_foreign_data_user | -       | -
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator
 -(3 rows)
 +(0 rows)
  
  CREATE FOREIGN DATA WRAPPER foo; -- duplicate
 -ERROR:  foreign-data wrapper "foo" already exists
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  DROP FOREIGN DATA WRAPPER foo;
 +ERROR:  foreign-data wrapper "foo" does not exist
  CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1');
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
+                                                  List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         | Access privileges |  FDW Options  | Description 
+ ------------+---------------------------+---------+--------------------------+-------------------+---------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |               | useless
 - foo        | regress_foreign_data_user | -       | -                        |                   | (testing '1') | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |               | 
 -(3 rows)
 +(0 rows)
  
  DROP FOREIGN DATA WRAPPER foo;
 +ERROR:  foreign-data wrapper "foo" does not exist
  CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', testing '2');   -- ERROR
 -ERROR:  option "testing" provided more than once
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', another '2');
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
+                                                        List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         | Access privileges |        FDW Options         | Description 
+ ------------+---------------------------+---------+--------------------------+-------------------+----------------------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |                            | useless
 - foo        | regress_foreign_data_user | -       | -                        |                   | (testing '1', another '2') | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |                            | 
 -(3 rows)
 +(0 rows)
  
  DROP FOREIGN DATA WRAPPER foo;
 +ERROR:  foreign-data wrapper "foo" does not exist
  SET ROLE regress_test_role;
  CREATE FOREIGN DATA WRAPPER foo; -- ERROR
 -ERROR:  permission denied to create foreign-data wrapper "foo"
 -HINT:  Must be superuser to create a foreign-data wrapper.
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  RESET ROLE;
  CREATE FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator;
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
+                                                 List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         | Access privileges | FDW Options | Description 
+ ------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |             | useless
 - foo        | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |             | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |             | 
 -(3 rows)
 +(0 rows)
  
  -- ALTER FOREIGN DATA WRAPPER
  ALTER FOREIGN DATA WRAPPER foo;                             -- ERROR
@@@ -100,46 -95,53 +100,46 @@@ ERROR:  syntax error at or near ";
  LINE 1: ALTER FOREIGN DATA WRAPPER foo;
                                        ^
  ALTER FOREIGN DATA WRAPPER foo VALIDATOR bar;               -- ERROR
 -ERROR:  function bar(text[], oid) does not exist
 +ERROR:  foreign-data wrapper "foo" does not exist
  ALTER FOREIGN DATA WRAPPER foo NO VALIDATOR;
 +ERROR:  foreign-data wrapper "foo" does not exist
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
+                                                 List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         | Access privileges | FDW Options | Description 
+ ------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |             | useless
 - foo        | regress_foreign_data_user | -       | -                        |                   |             | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |             | 
 -(3 rows)
 +(0 rows)
  
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '1', b '2');
 +ERROR:  foreign-data wrapper "foo" does not exist
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (SET c '4');         -- ERROR
 -ERROR:  option "c" not found
 +ERROR:  foreign-data wrapper "foo" does not exist
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP c);            -- ERROR
 -ERROR:  option "c" not found
 +ERROR:  foreign-data wrapper "foo" does not exist
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD x '1', DROP x);
 +ERROR:  foreign-data wrapper "foo" does not exist
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
+                                                  List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         | Access privileges |  FDW Options   | Description 
+ ------------+---------------------------+---------+--------------------------+-------------------+----------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |                | useless
 - foo        | regress_foreign_data_user | -       | -                        |                   | (a '1', b '2') | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |                | 
 -(3 rows)
 +(0 rows)
  
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP a, SET b '3', ADD c '4');
 +ERROR:  foreign-data wrapper "foo" does not exist
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
+                                                  List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         | Access privileges |  FDW Options   | Description 
+ ------------+---------------------------+---------+--------------------------+-------------------+----------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |                | useless
 - foo        | regress_foreign_data_user | -       | -                        |                   | (b '3', c '4') | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |                | 
 -(3 rows)
 +(0 rows)
  
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '2');
 +ERROR:  foreign-data wrapper "foo" does not exist
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (b '4');             -- ERROR
 -ERROR:  option "b" provided more than once
 +ERROR:  foreign-data wrapper "foo" does not exist
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
+                                                      List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
+ ------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |                       | useless
 - foo        | regress_foreign_data_user | -       | -                        |                   | (b '3', c '4', a '2') | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |                       | 
 -(3 rows)
 +(0 rows)
  
  SET ROLE regress_test_role;
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5');         -- ERROR
@@@ -147,12 -149,14 +147,12 @@@ ERROR:  permission denied to alter fore
  HINT:  Must be superuser to alter a foreign-data wrapper.
  SET ROLE regress_test_role_super;
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5');
 +ERROR:  foreign-data wrapper "foo" does not exist
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
 -                                                        List of foreign-data wrappers
 -    Name    |           Owner           | Handler |        Validator         | Access privileges |         FDW Options          | Description 
 -------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |                              | useless
 - foo        | regress_foreign_data_user | -       | -                        |                   | (b '3', c '4', a '2', d '5') | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |                              | 
 -(3 rows)
++                                                     List of foreign-data wrappers
++    Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
 +(0 rows)
  
  ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role;  -- ERROR
  ERROR:  permission denied to change owner of foreign-data wrapper "foo"
@@@ -166,68 -169,80 +166,76 @@@ ERROR:  permission denied to alter fore
  HINT:  Must be superuser to alter a foreign-data wrapper.
  RESET ROLE;
  \dew+
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
 +                  List of foreign-data wrappers
 - dummy      | regress_foreign_data_user | -       | -                        |                   |                              | useless
 - foo        | regress_test_role_super   | -       | -                        |                   | (b '3', c '4', a '2', d '5') | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |                              | 
 -(3 rows)
+                                                         List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         | Access privileges |         FDW Options          | Description 
+ ------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |                              | useless
 - foo1       | regress_test_role_super   | -       | -                        |                   | (b '3', c '4', a '2', d '5') | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |                              | 
 -(3 rows)
++(0 rows)
+ ALTER FOREIGN DATA WRAPPER foo RENAME TO foo1;
+ \dew+
+                                                         List of foreign-data wrappers
+     Name    |           Owner           | Handler |        Validator         | Access privileges |         FDW Options          | Description 
+ ------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
 +(0 rows)
  
 -ALTER FOREIGN DATA WRAPPER foo1 RENAME TO foo;
  -- DROP FOREIGN DATA WRAPPER
  DROP FOREIGN DATA WRAPPER nonexistent;                      -- ERROR
  ERROR:  foreign-data wrapper "nonexistent" does not exist
  DROP FOREIGN DATA WRAPPER IF EXISTS nonexistent;
  NOTICE:  foreign-data wrapper "nonexistent" does not exist, skipping
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
 -                                                        List of foreign-data wrappers
 -    Name    |           Owner           | Handler |        Validator         | Access privileges |         FDW Options          | Description 
 -------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |                              | useless
 - foo        | regress_test_role_super   | -       | -                        |                   | (b '3', c '4', a '2', d '5') | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |                              | 
 -(3 rows)
++                                                     List of foreign-data wrappers
++    Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
 +(0 rows)
  
  DROP ROLE regress_test_role_super;                          -- ERROR
 -ERROR:  role "regress_test_role_super" cannot be dropped because some objects depend on it
 -DETAIL:  owner of foreign-data wrapper foo
  SET ROLE regress_test_role_super;
 -DROP FOREIGN DATA WRAPPER foo;
 +ERROR:  role "regress_test_role_super" does not exist
 +DROP FOREIGN DATA WRAPPER foo;                              -- ERROR
 +ERROR:  foreign-data wrapper "foo" does not exist
  RESET ROLE;
 +ALTER ROLE regress_test_role_super SUPERUSER;
 +ERROR:  role "regress_test_role_super" does not exist
 +DROP FOREIGN DATA WRAPPER foo;
 +ERROR:  foreign-data wrapper "foo" does not exist
  DROP ROLE regress_test_role_super;
 +ERROR:  role "regress_test_role_super" does not exist
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
 -                                                List of foreign-data wrappers
 -    Name    |           Owner           | Handler |        Validator         | Access privileges | FDW Options | Description 
 -------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |             | useless
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |             | 
 -(2 rows)
++                                                     List of foreign-data wrappers
++    Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
 +(0 rows)
  
  CREATE FOREIGN DATA WRAPPER foo;
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s1 FOREIGN DATA WRAPPER foo;
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  COMMENT ON SERVER s1 IS 'foreign server';
 +ERROR:  server "s1" does not exist
  CREATE USER MAPPING FOR current_user SERVER s1;
 +ERROR:  Postgres-XL does not support USER MAPPING yet
 +DETAIL:  The feature is not currently supported
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
 -                                                List of foreign-data wrappers
 -    Name    |           Owner           | Handler |        Validator         | Access privileges | FDW Options | Description 
 -------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |             | useless
 - foo        | regress_foreign_data_user | -       | -                        |                   |             | 
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |             | 
 -(3 rows)
++                                                     List of foreign-data wrappers
++    Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
 +(0 rows)
  
  \des+
 -                                                   List of foreign servers
 - Name |           Owner           | Foreign-data wrapper | Access privileges | Type | Version | FDW Options |  Description   
 -------+---------------------------+----------------------+-------------------+------+---------+-------------+----------------
 - s1   | regress_foreign_data_user | foo                  |                   |      |         |             | foreign server
 -(1 row)
 +                              List of foreign servers
 + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options 
 +------+-------+----------------------+-------------------+------+---------+---------
 +(0 rows)
  
  \deu+
 -              List of user mappings
 - Server |         User name         | FDW Options 
 ---------+---------------------------+-------------
 - s1     | regress_foreign_data_user | 
 -(1 row)
 +    List of user mappings
 + Server | User name | Options 
 +--------+-----------+---------
 +(0 rows)
  
  DROP FOREIGN DATA WRAPPER foo;                              -- ERROR
  ERROR:  cannot drop foreign-data wrapper foo because other objects depend on it
@@@ -242,17 -256,19 +250,17 @@@ RESET ROLE
  DROP FOREIGN DATA WRAPPER foo CASCADE;
  NOTICE:  drop cascades to 2 other objects
  DETAIL:  drop cascades to server s1
- drop cascades to user mapping for foreign_data_user on server s1
+ drop cascades to user mapping for regress_foreign_data_user on server s1
  \dew+
-                   List of foreign-data wrappers
-  Name | Owner | Handler | Validator | Access privileges | Options 
- ------+-------+---------+-----------+-------------------+---------
 -                                                List of foreign-data wrappers
 -    Name    |           Owner           | Handler |        Validator         | Access privileges | FDW Options | Description 
 -------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
 - dummy      | regress_foreign_data_user | -       | -                        |                   |             | useless
 - postgresql | regress_foreign_data_user | -       | postgresql_fdw_validator |                   |             | 
 -(2 rows)
++                                                     List of foreign-data wrappers
++    Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
 +(0 rows)
  
  \des+
 -                                       List of foreign servers
 - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW Options | Description 
 -------+-------+----------------------+-------------------+------+---------+-------------+-------------
 +                              List of foreign servers
 + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options 
 +------+-------+----------------------+-------------------+------+---------+---------
  (0 rows)
  
  \deu+
  
  -- exercise CREATE SERVER
  CREATE SERVER s1 FOREIGN DATA WRAPPER foo;                  -- ERROR
 -ERROR:  foreign-data wrapper "foo" does not exist
 -CREATE FOREIGN DATA WRAPPER foo OPTIONS ("test wrapper" 'true');
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
 +CREATE FOREIGN DATA WRAPPER foo OPTIONS (test_wrapper 'true');
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s1 FOREIGN DATA WRAPPER foo;
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s1 FOREIGN DATA WRAPPER foo;                  -- ERROR
 -ERROR:  server "s1" already exists
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s2 FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s3 TYPE 'oracle' FOREIGN DATA WRAPPER foo;
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s4 TYPE 'oracle' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s5 VERSION '15.0' FOREIGN DATA WRAPPER foo;
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s6 VERSION '16.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s7 TYPE 'oracle' VERSION '17.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (foo '1'); -- ERROR
 -ERROR:  invalid option "foo"
 -HINT:  Valid options in this context are: authtype, service, connect_timeout, dbname, host, hostaddr, port, tty, options, requiressl, sslmode, gsslib
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (host 'localhost', dbname 's8db');
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  \des+
-                               List of foreign servers
-  Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options 
- ------+-------+----------------------+-------------------+------+---------+---------
 -                                                             List of foreign servers
 - Name |           Owner           | Foreign-data wrapper | Access privileges |  Type  | Version |            FDW Options            | Description 
 -------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+-------------
 - s1   | regress_foreign_data_user | foo                  |                   |        |         |                                   | 
 - s2   | regress_foreign_data_user | foo                  |                   |        |         | (host 'a', dbname 'b')            | 
 - s3   | regress_foreign_data_user | foo                  |                   | oracle |         |                                   | 
 - s4   | regress_foreign_data_user | foo                  |                   | oracle |         | (host 'a', dbname 'b')            | 
 - s5   | regress_foreign_data_user | foo                  |                   |        | 15.0    |                                   | 
 - s6   | regress_foreign_data_user | foo                  |                   |        | 16.0    | (host 'a', dbname 'b')            | 
 - s7   | regress_foreign_data_user | foo                  |                   | oracle | 17.0    | (host 'a', dbname 'b')            | 
 - s8   | regress_foreign_data_user | postgresql           |                   |        |         | (host 'localhost', dbname 's8db') | 
 -(8 rows)
++                                                     List of foreign-data wrappers
++    Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
 +(0 rows)
  
  SET ROLE regress_test_role;
  CREATE SERVER t1 FOREIGN DATA WRAPPER foo;                 -- ERROR: no usage on FDW
 -ERROR:  permission denied for foreign-data wrapper foo
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  RESET ROLE;
  GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role;
 +ERROR:  foreign-data wrapper "foo" does not exist
  SET ROLE regress_test_role;
  CREATE SERVER t1 FOREIGN DATA WRAPPER foo;
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  RESET ROLE;
  \des+
 -                                                             List of foreign servers
 - Name |           Owner           | Foreign-data wrapper | Access privileges |  Type  | Version |            FDW Options            | Description 
 -------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+-------------
 - s1   | regress_foreign_data_user | foo                  |                   |        |         |                                   | 
 - s2   | regress_foreign_data_user | foo                  |                   |        |         | (host 'a', dbname 'b')            | 
 - s3   | regress_foreign_data_user | foo                  |                   | oracle |         |                                   | 
 - s4   | regress_foreign_data_user | foo                  |                   | oracle |         | (host 'a', dbname 'b')            | 
 - s5   | regress_foreign_data_user | foo                  |                   |        | 15.0    |                                   | 
 - s6   | regress_foreign_data_user | foo                  |                   |        | 16.0    | (host 'a', dbname 'b')            | 
 - s7   | regress_foreign_data_user | foo                  |                   | oracle | 17.0    | (host 'a', dbname 'b')            | 
 - s8   | regress_foreign_data_user | postgresql           |                   |        |         | (host 'localhost', dbname 's8db') | 
 - t1   | regress_test_role         | foo                  |                   |        |         |                                   | 
 -(9 rows)
++                                                     List of foreign-data wrappers
++    Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
++(0 rows)
 +                              List of foreign servers
 + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options 
 +------+-------+----------------------+-------------------+------+---------+---------
 +(0 rows)
  
  REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_test_role;
 +ERROR:  foreign-data wrapper "foo" does not exist
  GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect;
 +ERROR:  foreign-data wrapper "foo" does not exist
  SET ROLE regress_test_role;
  CREATE SERVER t2 FOREIGN DATA WRAPPER foo;                 -- ERROR
 -ERROR:  permission denied for foreign-data wrapper foo
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  RESET ROLE;
  GRANT regress_test_indirect TO regress_test_role;
  SET ROLE regress_test_role;
  CREATE SERVER t2 FOREIGN DATA WRAPPER foo;
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  \des+
 -                                                             List of foreign servers
 - Name |           Owner           | Foreign-data wrapper | Access privileges |  Type  | Version |            FDW Options            | Description 
 -------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+-------------
 - s1   | regress_foreign_data_user | foo                  |                   |        |         |                                   | 
 - s2   | regress_foreign_data_user | foo                  |                   |        |         | (host 'a', dbname 'b')            | 
 - s3   | regress_foreign_data_user | foo                  |                   | oracle |         |                                   | 
 - s4   | regress_foreign_data_user | foo                  |                   | oracle |         | (host 'a', dbname 'b')            | 
 - s5   | regress_foreign_data_user | foo                  |                   |        | 15.0    |                                   | 
 - s6   | regress_foreign_data_user | foo                  |                   |        | 16.0    | (host 'a', dbname 'b')            | 
 - s7   | regress_foreign_data_user | foo                  |                   | oracle | 17.0    | (host 'a', dbname 'b')            | 
 - s8   | regress_foreign_data_user | postgresql           |                   |        |         | (host 'localhost', dbname 's8db') | 
 - t1   | regress_test_role         | foo                  |                   |        |         |                                   | 
 - t2   | regress_test_role         | foo                  |                   |        |         |                                   | 
 -(10 rows)
++                                                     List of foreign-data wrappers
++    Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
++(0 rows)
 +                              List of foreign servers
 + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options 
 +------+-------+----------------------+-------------------+------+---------+---------
 +(0 rows)
  
  RESET ROLE;
  REVOKE regress_test_indirect FROM regress_test_role;
@@@ -352,20 -366,27 +368,24 @@@ LINE 1: ALTER SERVER s0
  ALTER SERVER s0 OPTIONS (a '1');                            -- ERROR
  ERROR:  server "s0" does not exist
  ALTER SERVER s1 VERSION '1.0' OPTIONS (servername 's1');
 +ERROR:  server "s1" does not exist
  ALTER SERVER s2 VERSION '1.1';
 -ALTER SERVER s3 OPTIONS ("tns name" 'orcl', port '1521');
 +ERROR:  server "s2" does not exist
 +ALTER SERVER s3 OPTIONS (tnsname 'orcl', port '1521');
 +ERROR:  server "s3" does not exist
  GRANT USAGE ON FOREIGN SERVER s1 TO regress_test_role;
 +ERROR:  server "s1" does not exist
  GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role2 WITH GRANT OPTION;
 +ERROR:  server "s6" does not exist
  \des+
 -                                                                               List of foreign servers
 - Name |           Owner           | Foreign-data wrapper |                   Access privileges                   |  Type  | Version |            FDW Options            | Description 
 -------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+-----------------------------------+-------------
 - s1   | regress_foreign_data_user | foo                  | regress_foreign_data_user=U/regress_foreign_data_user+|        | 1.0     | (servername 's1')                 | 
 -      |                           |                      | regress_test_role=U/regress_foreign_data_user         |        |         |                                   | 
 - s2   | regress_foreign_data_user | foo                  |                                                       |        | 1.1     | (host 'a', dbname 'b')            | 
 - s3   | regress_foreign_data_user | foo                  |                                                       | oracle |         | ("tns name" 'orcl', port '1521')  | 
 - s4   | regress_foreign_data_user | foo                  |                                                       | oracle |         | (host 'a', dbname 'b')            | 
 - s5   | regress_foreign_data_user | foo                  |                                                       |        | 15.0    |                                   | 
 - s6   | regress_foreign_data_user | foo                  | regress_foreign_data_user=U/regress_foreign_data_user+|        | 16.0    | (host 'a', dbname 'b')            | 
 -      |                           |                      | regress_test_role2=U*/regress_foreign_data_user       |        |         |                                   | 
 - s7   | regress_foreign_data_user | foo                  |                                                       | oracle | 17.0    | (host 'a', dbname 'b')            | 
 - s8   | regress_foreign_data_user | postgresql           |                                                       |        |         | (host 'localhost', dbname 's8db') | 
 - t1   | regress_test_role         | foo                  |                                                       |        |         |                                   | 
 - t2   | regress_test_role         | foo                  |                                                       |        |         |                                   | 
 -(10 rows)
++                                                     List of foreign-data wrappers
++    Name    |           Owner           | Handler |        Validator         | Access privileges |      FDW Options      | Description 
++------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
++(0 rows)
 +                              List of foreign servers
 + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | Options 
 +------+-------+----------------------+-------------------+------+---------+---------
 +(0 rows)
  
  SET ROLE regress_test_role;
  ALTER SERVER s1 VERSION '1.1';                              -- ERROR
@@@ -393,50 -412,51 +413,50 @@@ RESET ROLE
  GRANT regress_test_indirect TO regress_test_role;
  SET ROLE regress_test_role;
  ALTER SERVER s1 OWNER TO regress_test_indirect;
 +ERROR:  server "s1" does not exist
  RESET ROLE;
  GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect;
 +ERROR:  foreign-data wrapper "foo" does not exist
  SET ROLE regress_test_role;
  ALTER SERVER s1 OWNER TO regress_test_indirect;
 +ERROR:  server "s1" does not exist
  RESET ROLE;
  DROP ROLE regress_test_indirect;                            -- ERROR
 -ERROR:  role "regress_test_indirect" cannot be dropped because some objects depend on it
 -DETAIL:  owner of server s1
 -privileges for foreign-data wrapper foo
  \des+
-                                                                            List of foreign servers
-  Name |         Owner         | Foreign-data wrapper |               Access privileges               |  Type  | Version |             FDW Options              | Description 
- ------+-----------------------+----------------------+-----------------------------------------------+--------+---------+--------------------------------------+-------------
-  s1   | regress_test_indirect | foo                  | regress_test_indirect=U/regress_test_indirect |        | 1.1     | (servername 's1')                    | 
-  s2   | foreign_data_user     | foo                  |                                               |        | 1.1     | (host 'a', dbname 'b')               | 
-  s3   | foreign_data_user     | foo                  |                                               | oracle |         | ("tns name" 'orcl', port '1521')     | 
-  s4   | foreign_data_user     | foo                  |                                               | oracle |         | (host 'a', dbname 'b')               | 
-  s5   | foreign_data_user     | foo                  |                                               |        | 15.0    |                                      | 
-  s6   | foreign_data_user     | foo                  | foreign_data_user=U/foreign_data_user        +|        | 16.0    | (host 'a', dbname 'b')               | 
-       |                       |                      | regress_test_role2=U*/foreign_data_user       |        |         |                                      | 
-  s7   | foreign_data_user     | foo                  |                                               | oracle | 17.0    | (host 'a', dbname 'b')               | 
-  s8   | foreign_data_user     | postgresql           |                                               |        |         | (dbname 'db1', connect_timeout '30') | 
-  t1   | regress_test_role     | foo                  |                                               |        |         |                                      | 
-  t2   | regress_test_role     | foo                  |                                               |        |         |                                      | 
+                                                                                  List of foreign servers
+  Name |           Owner           | Foreign-data wrapper |                   Access privileges                   |  Type  | Version |             FDW Options              | Description 
+ ------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+-------------
+  s1   | regress_test_indirect     | foo                  | regress_test_indirect=U/regress_test_indirect         |        | 1.1     | (servername 's1')                    | 
+  s2   | regress_foreign_data_user | foo                  |                                                       |        | 1.1     | (host 'a', dbname 'b')               | 
+  s3   | regress_foreign_data_user | foo                  |                                                       | oracle |         | ("tns name" 'orcl', port '1521')     | 
+  s4   | regress_foreign_data_user | foo                  |                                                       | oracle |         | (host 'a', dbname 'b')               | 
+  s5   | regress_foreign_data_user | foo                  |                                                       |        | 15.0    |                                      | 
+  s6   | regress_foreign_data_user | foo                  | regress_foreign_data_user=U/regress_foreign_data_user+|        | 16.0    | (host 'a', dbname 'b')               | 
+       |                           |                      | regress_test_role2=U*/regress_foreign_data_user       |        |         |                                      | 
+  s7   | regress_foreign_data_user | foo                  |                                                       | oracle | 17.0    | (host 'a', dbname 'b')               | 
+  s8   | regress_foreign_data_user | postgresql           |                                                       |        |         | (dbname 'db1', connect_timeout '30') | 
+  t1   | regress_test_role         | foo                  |                                                       |        |         |                                      | 
+  t2   | regress_test_role         | foo                  |                                                       |        |         |                                      | 
  (10 rows)
  
  ALTER SERVER s8 RENAME to s8new;
  \des+
-                                                                            List of foreign servers
-  Name  |         Owner         | Foreign-data wrapper |               Access privileges               |  Type  | Version |             FDW Options              | Description 
- -------+-----------------------+----------------------+-----------------------------------------------+--------+---------+--------------------------------------+-------------
-  s1    | regress_test_indirect | foo                  | regress_test_indirect=U/regress_test_indirect |        | 1.1     | (servername 's1')                    | 
-  s2    | foreign_data_user     | foo                  |                                               |        | 1.1     | (host 'a', dbname 'b')               | 
-  s3    | foreign_data_user     | foo                  |                                               | oracle |         | ("tns name" 'orcl', port '1521')     | 
-  s4    | foreign_data_user     | foo                  |                                               | oracle |         | (host 'a', dbname 'b')               | 
-  s5    | foreign_data_user     | foo                  |                                               |        | 15.0    |                                      | 
-  s6    | foreign_data_user     | foo                  | foreign_data_user=U/foreign_data_user        +|        | 16.0    | (host 'a', dbname 'b')               | 
-        |                       |                      | regress_test_role2=U*/foreign_data_user       |        |         |                                      | 
-  s7    | foreign_data_user     | foo                  |                                               | oracle | 17.0    | (host 'a', dbname 'b')               | 
-  s8new | foreign_data_user     | postgresql           |                                               |        |         | (dbname 'db1', connect_timeout '30') | 
-  t1    | regress_test_role     | foo                  |                                               |        |         |                                      | 
-  t2    | regress_test_role     | foo                  |                                               |        |         |                                      | 
+                                                                                  List of foreign servers
+  Name  |           Owner           | Foreign-data wrapper |                   Access privileges                   |  Type  | Version |             FDW Options              | Description 
+ -------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+-------------
+  s1    | regress_test_indirect     | foo                  | regress_test_indirect=U/regress_test_indirect         |        | 1.1     | (servername 's1')                    | 
+  s2    | regress_foreign_data_user | foo                  |                                                       |        | 1.1     | (host 'a', dbname 'b')               | 
+  s3    | regress_foreign_data_user | foo                  |                                                       | oracle |         | ("tns name" 'orcl', port '1521')     | 
+  s4    | regress_foreign_data_user | foo                  |                                                       | oracle |         | (host 'a', dbname 'b')               | 
+  s5    | regress_foreign_data_user | foo                  |                                                       |        | 15.0    |                                      | 
+  s6    | regress_foreign_data_user | foo                  | regress_foreign_data_user=U/regress_foreign_data_user+|        | 16.0    | (host 'a', dbname 'b')               | 
+        |                           |                      | regress_test_role2=U*/regress_foreign_data_user       |        |         |                                      | 
+  s7    | regress_foreign_data_user | foo                  |                                                       | oracle | 17.0    | (host 'a', dbname 'b')               | 
+  s8new | regress_foreign_data_user | postgresql           |                                                       |        |         | (dbname 'db1', connect_timeout '30') | 
+  t1    | regress_test_role         | foo                  |                                                       |        |         |                                      | 
+  t2    | regress_test_role         | foo                  |                                                       |        |         |                                      | 
  (10 rows)
  
 -ALTER SERVER s8new RENAME to s8;
  -- DROP SERVER
  DROP SERVER nonexistent;                                    -- ERROR
  ERROR:  server "nonexistent" does not exist
@@@ -483,15 -526,22 +503,15 @@@ List of user mapping
  
  DROP SERVER s3;                                             -- ERROR
  ERROR:  cannot drop server s3 because other objects depend on it
- DETAIL:  user mapping for foreign_data_user on server s3 depends on server s3
+ DETAIL:  user mapping for regress_foreign_data_user on server s3 depends on server s3
  HINT:  Use DROP ... CASCADE to drop the dependent objects too.
  DROP SERVER s3 CASCADE;
- NOTICE:  drop cascades to user mapping for foreign_data_user on server s3
+ NOTICE:  drop cascades to user mapping for regress_foreign_data_user on server s3
  \des
 -                 List of foreign servers
 - Name |           Owner           | Foreign-data wrapper 
 -------+---------------------------+----------------------
 - s4   | regress_foreign_data_user | foo
 - s5   | regress_foreign_data_user | foo
 - s6   | regress_foreign_data_user | foo
 - s7   | regress_foreign_data_user | foo
 - s8   | regress_foreign_data_user | postgresql
 - t1   | regress_test_role         | foo
 - t2   | regress_test_role         | foo
 -(7 rows)
 +       List of foreign servers
 + Name | Owner | Foreign-data wrapper 
 +------+-------+----------------------
 +(0 rows)
  
  \deu
  List of user mappings
@@@ -592,27 -632,30 +612,26 @@@ NOTICE:  role "regress_test_missing_rol
  DROP USER MAPPING IF EXISTS FOR user SERVER ss4;
  NOTICE:  server does not exist, skipping
  DROP USER MAPPING IF EXISTS FOR public SERVER s7;
 -NOTICE:  user mapping "public" does not exist for the server, skipping
 +NOTICE:  server does not exist, skipping
  CREATE USER MAPPING FOR public SERVER s8;
 +ERROR:  Postgres-XL does not support USER MAPPING yet
 +DETAIL:  The feature is not currently supported
  SET ROLE regress_test_role;
  DROP USER MAPPING FOR public SERVER s8;                     -- ERROR
 -ERROR:  must be owner of foreign server s8
 +ERROR:  server "s8" does not exist
  RESET ROLE;
  DROP SERVER s7;
 +ERROR:  server "s7" does not exist
  \deu
 -       List of user mappings
 - Server |         User name         
 ---------+---------------------------
 - s4     | public
 - s4     | regress_foreign_data_user
 - s5     | regress_test_role
 - s6     | regress_test_role
 - s8     | public
 - s8     | regress_foreign_data_user
 - t1     | public
 - t1     | regress_test_role
 -(8 rows)
 -
 +List of user mappings
 + Server | User name 
 +--------+-----------
 +(0 rows)
  -- CREATE FOREIGN TABLE
  CREATE SCHEMA foreign_schema;
 -CREATE SERVER s0 FOREIGN DATA WRAPPER dummy;
 +CREATE SERVER sc FOREIGN DATA WRAPPER dummy;
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
  CREATE FOREIGN TABLE ft1 ();                                    -- ERROR
  ERROR:  syntax error at or near ";"
  LINE 1: CREATE FOREIGN TABLE ft1 ();
@@@ -852,42 -871,74 +871,41 @@@ ALTER FOREIGN TABLE IF EXISTS doesnt_ex
  NOTICE:  relation "doesnt_exist_ft1" does not exist, skipping
  -- Information schema
  SELECT * FROM information_schema.foreign_data_wrappers ORDER BY 1, 2;
 - foreign_data_wrapper_catalog | foreign_data_wrapper_name | authorization_identifier  | library_name | foreign_data_wrapper_language 
 -------------------------------+---------------------------+---------------------------+--------------+-------------------------------
 - regression                   | dummy                     | regress_foreign_data_user |              | c
 - regression                   | foo                       | regress_foreign_data_user |              | c
 - regression                   | postgresql                | regress_foreign_data_user |              | c
 -(3 rows)
 + foreign_data_wrapper_catalog | foreign_data_wrapper_name | authorization_identifier | library_name | foreign_data_wrapper_language 
 +------------------------------+---------------------------+--------------------------+--------------+-------------------------------
 +(0 rows)
  
  SELECT * FROM information_schema.foreign_data_wrapper_options ORDER BY 1, 2, 3;
 - foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name  | option_value 
 -------------------------------+---------------------------+--------------+--------------
 - regression                   | foo                       | test wrapper | true
 -(1 row)
 + foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value 
 +------------------------------+---------------------------+-------------+--------------
 +(0 rows)
  
  SELECT * FROM information_schema.foreign_servers ORDER BY 1, 2;
 - foreign_server_catalog | foreign_server_name | foreign_data_wrapper_catalog | foreign_data_wrapper_name | foreign_server_type | foreign_server_version | authorization_identifier  
 -------------------------+---------------------+------------------------------+---------------------------+---------------------+------------------------+---------------------------
 - regression             | s0                  | regression                   | dummy                     |                     |                        | regress_foreign_data_user
 - regression             | s4                  | regression                   | foo                       | oracle              |                        | regress_foreign_data_user
 - regression             | s5                  | regression                   | foo                       |                     | 15.0                   | regress_test_role
 - regression             | s6                  | regression                   | foo                       |                     | 16.0                   | regress_test_indirect
 - regression             | s8                  | regression                   | postgresql                |                     |                        | regress_foreign_data_user
 - regression             | t1                  | regression                   | foo                       |                     |                        | regress_test_indirect
 - regression             | t2                  | regression                   | foo                       |                     |                        | regress_test_role
 -(7 rows)
 -
 + foreign_server_catalog | foreign_server_name | foreign_data_wrapper_catalog | foreign_data_wrapper_name | foreign_server_type | foreign_server_version | authorization_identifier 
 +------------------------+---------------------+------------------------------+---------------------------+---------------------+------------------------+--------------------------
 +(0 rows)
  SELECT * FROM information_schema.foreign_server_options ORDER BY 1, 2, 3;
 - foreign_server_catalog | foreign_server_name |   option_name   | option_value 
 -------------------------+---------------------+-----------------+--------------
 - regression             | s4                  | dbname          | b
 - regression             | s4                  | host            | a
 - regression             | s6                  | dbname          | b
 - regression             | s6                  | host            | a
 - regression             | s8                  | connect_timeout | 30
 - regression             | s8                  | dbname          | db1
 -(6 rows)
 + foreign_server_catalog | foreign_server_name | option_name | option_value 
 +------------------------+---------------------+-------------+--------------
 +(0 rows)
  
  SELECT * FROM information_schema.user_mappings ORDER BY lower(authorization_identifier), 2, 3;
 - authorization_identifier  | foreign_server_catalog | foreign_server_name 
 ----------------------------+------------------------+---------------------
 - PUBLIC                    | regression             | s4
 - PUBLIC                    | regression             | s8
 - PUBLIC                    | regression             | t1
 - regress_foreign_data_user | regression             | s4
 - regress_foreign_data_user | regression             | s8
 - regress_test_role         | regression             | s5
 - regress_test_role         | regression             | s6
 - regress_test_role         | regression             | t1
 -(8 rows)
 + authorization_identifier | foreign_server_catalog | foreign_server_name 
 +--------------------------+------------------------+---------------------
 +(0 rows)
  
  SELECT * FROM information_schema.user_mapping_options ORDER BY lower(authorization_identifier), 2, 3, 4;
 - authorization_identifier  | foreign_server_catalog | foreign_server_name | option_name  | option_value 
 ----------------------------+------------------------+---------------------+--------------+--------------
 - PUBLIC                    | regression             | s4                  | this mapping | is public
 - PUBLIC                    | regression             | t1                  | modified     | 1
 - regress_foreign_data_user | regression             | s8                  | password     | public
 - regress_test_role         | regression             | s5                  | modified     | 1
 - regress_test_role         | regression             | s6                  | username     | test
 - regress_test_role         | regression             | t1                  | password     | boo
 - regress_test_role         | regression             | t1                  | username     | bob
 -(7 rows)
 + authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value 
 +--------------------------+------------------------+---------------------+-------------+--------------
 +(0 rows)
  
  SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
-         grantor        |        grantee        | object_catalog | object_schema | object_name |     object_type      | privilege_type | is_grantable 
- -----------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+--------------
-  foreign_data_user     | foreign_data_user     | regression     |               | foo         | FOREIGN DATA WRAPPER | USAGE          | YES
-  foreign_data_user     | regress_test_indirect | regression     |               | foo         | FOREIGN DATA WRAPPER | USAGE          | NO
-  regress_test_indirect | regress_test_indirect | regression     |               | s6          | FOREIGN SERVER       | USAGE          | YES
-  regress_test_indirect | regress_test_role2    | regression     |               | s6          | FOREIGN SERVER       | USAGE          | YES
+           grantor          |          grantee          | object_catalog | object_schema | object_name |     object_type      | privilege_type | is_grantable 
+ ---------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+--------------
+  regress_foreign_data_user | regress_foreign_data_user | regression     |               | foo         | FOREIGN DATA WRAPPER | USAGE          | YES
+  regress_foreign_data_user | regress_test_indirect     | regression     |               | foo         | FOREIGN DATA WRAPPER | USAGE          | NO
+  regress_test_indirect     | regress_test_indirect     | regression     |               | s6          | FOREIGN SERVER       | USAGE          | YES
+  regress_test_indirect     | regress_test_role2        | regression     |               | s6          | FOREIGN SERVER       | USAGE          | YES
  (4 rows)
  
  SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
@@@ -913,14 -968,19 +931,14 @@@ SET ROLE regress_test_role
  SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4;
   authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value 
  --------------------------+------------------------+---------------------+-------------+--------------
 - PUBLIC                   | regression             | t1                  | modified    | 1
 - regress_test_role        | regression             | s5                  | modified    | 1
 - regress_test_role        | regression             | s6                  | username    | test
 - regress_test_role        | regression             | t1                  | password    | boo
 - regress_test_role        | regression             | t1                  | username    | bob
 -(5 rows)
 +(0 rows)
  
  SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
-         grantor        |        grantee        | object_catalog | object_schema | object_name |     object_type      | privilege_type | is_grantable 
- -----------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+--------------
-  foreign_data_user     | regress_test_indirect | regression     |               | foo         | FOREIGN DATA WRAPPER | USAGE          | NO
-  regress_test_indirect | regress_test_indirect | regression     |               | s6          | FOREIGN SERVER       | USAGE          | YES
-  regress_test_indirect | regress_test_role2    | regression     |               | s6          | FOREIGN SERVER       | USAGE          | YES
+           grantor          |        grantee        | object_catalog | object_schema | object_name |     object_type      | privilege_type | is_grantable 
+ ---------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+--------------
+  regress_foreign_data_user | regress_test_indirect | regression     |               | foo         | FOREIGN DATA WRAPPER | USAGE          | NO
+  regress_test_indirect     | regress_test_indirect | regression     |               | s6          | FOREIGN SERVER       | USAGE          | YES
+  regress_test_indirect     | regress_test_role2    | regression     |               | s6          | FOREIGN SERVER       | USAGE          | YES
  (3 rows)
  
  SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
@@@ -1000,31 -1079,37 +1018,31 @@@ SELECT has_server_privilege
  (1 row)
  
  SELECT has_server_privilege(
 -    (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 's8', 'USAGE');
 - has_server_privilege 
 -----------------------
 - f
 -(1 row)
 -
 +    (SELECT oid FROM pg_catalog.pg_roles WHERE rolname='regress_test_role'), 's8', 'USAGE');
 +ERROR:  server "s8" does not exist
  SELECT has_server_privilege('s8', 'USAGE');
 - has_server_privilege 
 -----------------------
 - t
 -(1 row)
 -
 +ERROR:  server "s8" does not exist
  GRANT USAGE ON FOREIGN SERVER s8 TO regress_test_role;
 +ERROR:  server "s8" does not exist
  SELECT has_server_privilege('regress_test_role', 's8', 'USAGE');
 - has_server_privilege 
 -----------------------
 - t
 -(1 row)
 -
 +ERROR:  server "s8" does not exist
  REVOKE USAGE ON FOREIGN SERVER s8 FROM regress_test_role;
 +ERROR:  server "s8" does not exist
  GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role;
 +ERROR:  server "s4" does not exist
  DROP USER MAPPING FOR public SERVER s4;
 +ERROR:  server "s4" does not exist
  ALTER SERVER s6 OPTIONS (DROP host, DROP dbname);
 +ERROR:  server "s6" does not exist
  ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (DROP username);
 +ERROR:  server "s6" does not exist
  ALTER FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator;
 -WARNING:  changing the foreign-data wrapper validator can cause the options for dependent objects to become invalid
 +ERROR:  foreign-data wrapper "foo" does not exist
  -- Privileges
- SET ROLE unprivileged_role;
+ SET ROLE regress_unprivileged_role;
  CREATE FOREIGN DATA WRAPPER foobar;                             -- ERROR
 -ERROR:  permission denied to create foreign-data wrapper "foobar"
 -HINT:  Must be superuser to create a foreign-data wrapper.
 +ERROR:  Postgres-XL does not support FOREIGN DATA WRAPPER yet
 +DETAIL:  The feature is not currently supported
  ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true');         -- ERROR
  ERROR:  permission denied to alter foreign-data wrapper "foo"
  HINT:  Must be superuser to alter a foreign-data wrapper.
@@@ -1115,23 -1182,18 +1133,23 @@@ DETAIL:  The feature is not currently s
  DROP SERVER s9 CASCADE;
  NOTICE:  drop cascades to 2 other objects
  DETAIL:  drop cascades to user mapping for public on server s9
- drop cascades to user mapping for unprivileged_role on server s9
+ drop cascades to user mapping for regress_unprivileged_role on server s9
  RESET ROLE;
  CREATE SERVER s9 FOREIGN DATA WRAPPER foo;
 -GRANT USAGE ON FOREIGN SERVER s9 TO regress_unprivileged_role;
 -SET ROLE regress_unprivileged_role;
 +ERROR:  Postgres-XL does not support SERVER yet
 +DETAIL:  The feature is not currently supported
 +GRANT USAGE ON FOREIGN SERVER s9 TO unprivileged_role;
 +ERROR:  server "s9" does not exist
 +SET ROLE unprivileged_role;
  ALTER SERVER s9 VERSION '1.2';                                  -- ERROR
 -ERROR:  must be owner of foreign server s9
 +ERROR:  server "s9" does not exist
  GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role;          -- WARNING
 -WARNING:  no privileges were granted for "s9"
 +ERROR:  server "s9" does not exist
  CREATE USER MAPPING FOR current_user SERVER s9;
 +ERROR:  Postgres-XL does not support USER MAPPING yet
 +DETAIL:  The feature is not currently supported
  DROP SERVER s9 CASCADE;                                         -- ERROR
 -ERROR:  must be owner of foreign server s9
 +ERROR:  server "s9" does not exist
  RESET ROLE;
  -- Triggers
  CREATE FUNCTION dummy_trigger() RETURNS TRIGGER AS $$
@@@ -1689,43 -1758,32 +1707,38 @@@ ERROR:  role "regress_test_role" canno
  DETAIL:  privileges for server s4
  privileges for foreign-data wrapper foo
  owner of user mapping for regress_test_role on server s6
- owner of user mapping for regress_test_role on server s5
- owner of server s5
- owner of server t2
- DROP SERVER s5 CASCADE;
- NOTICE:  drop cascades to user mapping for regress_test_role on server s5
  DROP SERVER t1 CASCADE;
  NOTICE:  drop cascades to user mapping for public on server t1
 +DROP SERVER t2;
 +ERROR:  server "t2" does not exist
  DROP USER MAPPING FOR regress_test_role SERVER s6;
 +ERROR:  role "regress_test_role" does not exist
  -- This test causes some order dependent cascade detail output,
 --- so switch to terse mode for it.
 +-- so switch to terse mode for it. 
  \set VERBOSITY terse
  DROP FOREIGN DATA WRAPPER foo CASCADE;
 -NOTICE:  drop cascades to 5 other objects
 +ERROR:  foreign-data wrapper "foo" does not exist
  \set VERBOSITY default
  DROP SERVER s8 CASCADE;
  NOTICE:  drop cascades to 2 other objects
- DETAIL:  drop cascades to user mapping for foreign_data_user on server s8
+ DETAIL:  drop cascades to user mapping for regress_foreign_data_user on server s8
  drop cascades to user mapping for public on server s8
  DROP ROLE regress_test_indirect;
 +ERROR:  role "regress_test_indirect" does not exist
  DROP ROLE regress_test_role;
 -DROP ROLE regress_unprivileged_role;                        -- ERROR
 -ERROR:  role "regress_unprivileged_role" cannot be dropped because some objects depend on it
 -DETAIL:  privileges for foreign-data wrapper postgresql
 -REVOKE ALL ON FOREIGN DATA WRAPPER postgresql FROM regress_unprivileged_role;
 -DROP ROLE regress_unprivileged_role;
 +ERROR:  role "regress_test_role" does not exist
 +DROP ROLE unprivileged_role;                                -- ERROR
 +REVOKE ALL ON FOREIGN DATA WRAPPER postgresql FROM unprivileged_role;
 +ERROR:  foreign-data wrapper "postgresql" does not exist
 +DROP ROLE unprivileged_role;
 +ERROR:  role "unprivileged_role" does not exist
  DROP ROLE regress_test_role2;
  DROP FOREIGN DATA WRAPPER postgresql CASCADE;
 +ERROR:  foreign-data wrapper "postgresql" does not exist
  DROP FOREIGN DATA WRAPPER dummy CASCADE;
 -NOTICE:  drop cascades to server s0
 +ERROR:  foreign-data wrapper "dummy" does not exist
  \c
- DROP ROLE foreign_data_user;
+ DROP ROLE regress_foreign_data_user;
  -- At this point we should have no wrappers, no servers, and no mappings.
  SELECT fdwname, fdwhandler, fdwvalidator, fdwoptions FROM pg_foreign_data_wrapper;
   fdwname | fdwhandler | fdwvalidator | fdwoptions 
index 928d662d777857d7e23abe4bc66e1d0b12f151fd,044881af711ff50186a69715b16334b1832d4420..9a2340df2d7d8949c69e4ba8020dd75cf7d9df6c
@@@ -1356,12 -1354,30 +1356,34 @@@ create temp table cc (f1 int reference
  insert into pp values(12);
  insert into pp values(11);
  update pp set f1=f1+1;
 +ERROR:  could not plan this distributed update
 +DETAIL:  correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
  insert into cc values(13);
 +ERROR:  insert or update on table "cc" violates foreign key constraint "cc_f1_fkey"
 +DETAIL:  Key (f1)=(13) is not present in table "pp".
  update pp set f1=f1+1; -- fail
 -ERROR:  update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc"
 -DETAIL:  Key (f1)=(13) is still referenced from table "cc".
 +ERROR:  could not plan this distributed update
 +DETAIL:  correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
  drop table pp, cc;
+ --
+ -- Test interaction of foreign-key optimization with rules (bug #14219)
+ --
+ create temp table t1 (a integer primary key, b text);
+ create temp table t2 (a integer primary key, b integer references t1);
+ create rule r1 as on delete to t1 do delete from t2 where t2.b = old.a;
+ explain (costs off) delete from t1 where a = 1;
+                  QUERY PLAN                 
+ --------------------------------------------
+  Delete on t2
+    ->  Nested Loop
+          ->  Index Scan using t1_pkey on t1
+                Index Cond: (a = 1)
+          ->  Seq Scan on t2
+                Filter: (b = 1)
+  
+  Delete on t1
+    ->  Index Scan using t1_pkey on t1
+          Index Cond: (a = 1)
+ (10 rows)
+ delete from t1 where a = 1;
index f2ff91d898c12ffe35ec8578949d75863ee5c875,c7181b0397ec4688a416b40eb0d7233f8632c552..7574ec9fe0328d71d90432342736222854009845
@@@ -63,17 -61,16 +63,17 @@@ select p from gist_tbl where p <@ box(p
  -- Also test an index-only knn-search
  explain (costs off)
  select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5))
 -order by p <-> point(0.201, 0.201);
 -                       QUERY PLAN                       
 ---------------------------------------------------------
 - Index Only Scan using gist_tbl_point_index on gist_tbl
 -   Index Cond: (p <@ '(0.5,0.5),(0,0)'::box)
 -   Order By: (p <-> '(0.201,0.201)'::point)
 -(3 rows)
 +order by p <-> point(0.2, 0.2);
 +                          QUERY PLAN                          
 +--------------------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Index Only Scan using gist_tbl_point_index on gist_tbl
 +         Index Cond: (p <@ '(0.5,0.5),(0,0)'::box)
 +         Order By: (p <-> '(0.2,0.2)'::point)
 +(4 rows)
  
  select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5))
- order by p <-> point(0.2, 0.2);
+ order by p <-> point(0.201, 0.201);
        p      
  -------------
   (0.2,0.2)
  -- Check commuted case as well
  explain (costs off)
  select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5))
 -order by point(0.101, 0.101) <-> p;
 -                       QUERY PLAN                       
 ---------------------------------------------------------
 - Index Only Scan using gist_tbl_point_index on gist_tbl
 -   Index Cond: (p <@ '(0.5,0.5),(0,0)'::box)
 -   Order By: (p <-> '(0.101,0.101)'::point)
 -(3 rows)
 +order by point(0.1, 0.1) <-> p;
 +                          QUERY PLAN                          
 +--------------------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Index Only Scan using gist_tbl_point_index on gist_tbl
 +         Index Cond: (p <@ '(0.5,0.5),(0,0)'::box)
 +         Order By: (p <-> '(0.1,0.1)'::point)
 +(4 rows)
  
  select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5))
- order by point(0.1, 0.1) <-> p;
+ order by point(0.101, 0.101) <-> p;
        p      
  -------------
   (0.1,0.1)
index 84e0ba42591d3d24a7d1fb45f161d495dc252712,260ccd52c87b03831133d1679e28221c9f0aca84..a3ad1bd10385cdca9bc5cab9a024e5ed88d6bf81
@@@ -197,57 -578,134 +197,173 @@@ ERROR:  GROUPING SETS, ROLLUP or CUBE i
  select ten, sum(distinct four) from onek a
  group by grouping sets((ten,four),(ten))
  having exists (select 1 from onek b where sum(distinct a.four) = b.four);
 - ten | sum 
 ------+-----
 -   0 |   0
 -   0 |   2
 -   0 |   2
 -   1 |   1
 -   1 |   3
 -   2 |   0
 -   2 |   2
 -   2 |   2
 -   3 |   1
 -   3 |   3
 -   4 |   0
 -   4 |   2
 -   4 |   2
 -   5 |   1
 -   5 |   3
 -   6 |   0
 -   6 |   2
 -   6 |   2
 -   7 |   1
 -   7 |   3
 -   8 |   0
 -   8 |   2
 -   8 |   2
 -   9 |   1
 -   9 |   3
 -(25 rows)
 +ERROR:  GROUPING SETS, ROLLUP or CUBE is not yet supported
 +-- Tests around pushdown of HAVING clauses, partially testing against previous bugs
 +select a,count(*) from gstest2 group by rollup(a) order by a;
 +ERROR:  GROUPING SETS, ROLLUP or CUBE is not yet supported
 +select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
 +ERROR:  GROUPING SETS, ROLLUP or CUBE is not yet supported
 +explain (costs off)
 +  select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
 +ERROR:  GROUPING SETS, ROLLUP or CUBE is not yet supported
 +select v.c, (select count(*) from gstest2 group by () having v.c)
 +  from (values (false),(true)) v(c) order by v.c;
 + c | count 
 +---+-------
 + f |      
 + t |     9
 +(2 rows)
 +
 +explain (costs off)
 +  select v.c, (select count(*) from gstest2 group by () having v.c)
 +    from (values (false),(true)) v(c) order by v.c;
 +                               QUERY PLAN                                
 +-------------------------------------------------------------------------
 + Sort
 +   Sort Key: "*VALUES*".column1
 +   ->  Values Scan on "*VALUES*"
 +         SubPlan 1
 +           ->  Aggregate
 +                 Group Key: ()
 +                 Filter: "*VALUES*".column1
 +                 ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +                       ->  Aggregate
 +                             ->  Seq Scan on gstest2
 +(10 rows)
 +
 +-- HAVING with GROUPING queries
 +select ten, grouping(ten) from onek
 +group by grouping sets(ten) having grouping(ten) >= 0
 +order by 2,1;
 +ERROR:  GROUPING SETS, ROLLUP or CUBE is not yet supported
 +select ten, grouping(ten) from onek
 +group by grouping sets(ten, four) having grouping(ten) > 0
 +order by 2,1;
 +ERROR:  GROUPING SETS, ROLLUP or CUBE is not yet supported
 +select ten, grouping(ten) from onek
 +group by rollup(ten) having grouping(ten) > 0
 +order by 2,1;
 +ERROR:  GROUPING SETS, ROLLUP or CUBE is not yet supported
 +select ten, grouping(ten) from onek
 +group by cube(ten) having grouping(ten) > 0
 +order by 2,1;
 +ERROR:  GROUPING SETS, ROLLUP or CUBE is not yet supported
++select ten, grouping(ten) from onek
++group by (ten) having grouping(ten) >= 0
++order by 2,1;
++ ten | grouping 
++-----+----------
++   0 |        0
++   1 |        0
++   2 |        0
++   3 |        0
++   4 |        0
++   5 |        0
++   6 |        0
++   7 |        0
++   8 |        0
++   9 |        0
++(10 rows)
+ -- Tests around pushdown of HAVING clauses, partially testing against previous bugs
+ select a,count(*) from gstest2 group by rollup(a) order by a;
+  a | count 
+ ---+-------
+  1 |     8
+  2 |     1
+    |     9
+ (3 rows)
+ select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
+  a | count 
+ ---+-------
+  2 |     1
+    |     9
+ (2 rows)
+ explain (costs off)
+   select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
+             QUERY PLAN            
+ ----------------------------------
+  GroupAggregate
+    Group Key: a
+    Group Key: ()
+    Filter: (a IS DISTINCT FROM 1)
+    ->  Sort
+          Sort Key: a
+          ->  Seq Scan on gstest2
+ (7 rows)
+ select v.c, (select count(*) from gstest2 group by () having v.c)
+   from (values (false),(true)) v(c) order by v.c;
+  c | count 
+ ---+-------
+  f |      
+  t |     9
+ (2 rows)
+ explain (costs off)
+   select v.c, (select count(*) from gstest2 group by () having v.c)
+     from (values (false),(true)) v(c) order by v.c;
+                         QUERY PLAN                         
+ -----------------------------------------------------------
+  Sort
+    Sort Key: "*VALUES*".column1
+    ->  Values Scan on "*VALUES*"
+          SubPlan 1
+            ->  Aggregate
+                  Group Key: ()
+                  Filter: "*VALUES*".column1
+                  ->  Result
+                        One-Time Filter: "*VALUES*".column1
+                        ->  Seq Scan on gstest2
+ (10 rows)
+ -- HAVING with GROUPING queries
+ select ten, grouping(ten) from onek
+ group by grouping sets(ten) having grouping(ten) >= 0
+ order by 2,1;
+  ten | grouping 
+ -----+----------
+    0 |        0
+    1 |        0
+    2 |        0
+    3 |        0
+    4 |        0
+    5 |        0
+    6 |        0
+    7 |        0
+    8 |        0
+    9 |        0
+ (10 rows)
+ select ten, grouping(ten) from onek
+ group by grouping sets(ten, four) having grouping(ten) > 0
+ order by 2,1;
+  ten | grouping 
+ -----+----------
+      |        1
+      |        1
+      |        1
+      |        1
+ (4 rows)
+ select ten, grouping(ten) from onek
+ group by rollup(ten) having grouping(ten) > 0
+ order by 2,1;
+  ten | grouping 
+ -----+----------
+      |        1
+ (1 row)
+ select ten, grouping(ten) from onek
+ group by cube(ten) having grouping(ten) > 0
+ order by 2,1;
+  ten | grouping 
+ -----+----------
+      |        1
+ (1 row)
  select ten, grouping(ten) from onek
  group by (ten) having grouping(ten) >= 0
  order by 2,1;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index b8ea9e552f5d6e9021e9999e728781d07f1ad646,d9bbae097b73fb64448bd86cb42ba467ae025234..49dcb1814bb09b3d605a143600f3eb57822fbc7f
@@@ -2925,106 -2967,89 +3037,177 @@@ explain (costs off
  select * from
    tenk1, int8_tbl a, int8_tbl b
  where thousand = a.q1 and tenthous = b.q1 and a.q2 = 1 and b.q2 = 2;
 -                             QUERY PLAN                              
 ----------------------------------------------------------------------
 +                       QUERY PLAN                        
 +---------------------------------------------------------
 + Remote Fast Query Execution
 +   Node/s: datanode_1, datanode_2
 +   ->  Merge Join
 +         Merge Cond: (tenk1.thousand = a.q1)
 +         ->  Sort
 +               Sort Key: tenk1.thousand
 +               ->  Merge Join
 +                     Merge Cond: (tenk1.tenthous = b.q1)
 +                     ->  Sort
 +                           Sort Key: tenk1.tenthous
 +                           ->  Seq Scan on tenk1
 +                     ->  Sort
 +                           Sort Key: b.q1
 +                           ->  Seq Scan on int8_tbl b
 +                                 Filter: (q2 = 2)
 +         ->  Sort
 +               Sort Key: a.q1
 +               ->  Seq Scan on int8_tbl a
 +                     Filter: (q2 = 1)
 +(19 rows)
 +
 +--
 +-- test a corner case in which we shouldn't apply the star-schema optimization
 +--
 +explain (costs off, nodes off)
 +select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
 +  tenk1 t1
 +  inner join int4_tbl i1
 +    left join (select v1.x2, v2.y1, 11 AS d1
 +               from (values(1,0)) v1(x1,x2)
 +               left join (values(3,1)) v2(y1,y2)
 +               on v1.x1 = v2.y2) subq1
 +    on (i1.f1 = subq1.x2)
 +  on (t1.unique2 = subq1.d1)
 +  left join tenk1 t2
 +  on (subq1.y1 = t2.unique1)
 +where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
 +                                    QUERY PLAN                                     
 +-----------------------------------------------------------------------------------
   Nested Loop
 -   ->  Seq Scan on int8_tbl b
 -         Filter: (q2 = 2)
 +   Join Filter: (t1.stringu1 > t2.stringu2)
     ->  Nested Loop
 -         ->  Seq Scan on int8_tbl a
 -               Filter: (q2 = 1)
 -         ->  Index Scan using tenk1_thous_tenthous on tenk1
 -               Index Cond: ((thousand = a.q1) AND (tenthous = b.q1))
 -(8 rows)
 +         Join Filter: ((0) = i1.f1)
 +         ->  Nested Loop
 +               ->  Nested Loop
 +                     Join Filter: ((1) = (1))
 +                     ->  Result
 +                     ->  Result
 +               ->  Materialize
 +                     ->  Remote Subquery Scan on all
 +                           ->  Index Scan using tenk1_unique2 on tenk1 t1
 +                                 Index Cond: ((unique2 = (11)) AND (unique2 < 42))
 +         ->  Materialize
 +               ->  Remote Subquery Scan on all
 +                     ->  Seq Scan on int4_tbl i1
 +   ->  Materialize
 +         ->  Remote Subquery Scan on all
 +               ->  Index Scan using tenk1_unique1 on tenk1 t2
 +                     Index Cond: (unique1 = (3))
 +(20 rows)
 +
 +select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
 +  tenk1 t1
 +  inner join int4_tbl i1
 +    left join (select v1.x2, v2.y1, 11 AS d1
 +               from (values(1,0)) v1(x1,x2)
 +               left join (values(3,1)) v2(y1,y2)
 +               on v1.x1 = v2.y2) subq1
 +    on (i1.f1 = subq1.x2)
 +  on (t1.unique2 = subq1.d1)
 +  left join tenk1 t2
 +  on (subq1.y1 = t2.unique1)
 +where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
 + unique2 | stringu1 | unique1 | stringu2 
 +---------+----------+---------+----------
 +      11 | WFAAAA   |       3 | LKIAAA
 +(1 row)
 +
 +-- variant that isn't quite a star-schema case
 +select ss1.d1 from
 +  tenk1 as t1
 +  inner join tenk1 as t2
 +  on t1.tenthous = t2.ten
 +  inner join
 +    int8_tbl as i8
 +    left join int4_tbl as i4
 +      inner join (select 64::information_schema.cardinal_number as d1
 +                  from tenk1 t3,
 +                       lateral (select abs(t3.unique1) + random()) ss0(x)
 +                  where t3.fivethous < 0) as ss1
 +      on i4.f1 = ss1.d1
 +    on i8.q1 = i4.f1
 +  on t1.tenthous = ss1.d1
 +where t1.unique1 < i4.f1;
 + d1 
 +----
 +(0 rows)
  
+ --
+ -- test a corner case in which we shouldn't apply the star-schema optimization
+ --
+ explain (costs off)
+ select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+   tenk1 t1
+   inner join int4_tbl i1
+     left join (select v1.x2, v2.y1, 11 AS d1
+                from (values(1,0)) v1(x1,x2)
+                left join (values(3,1)) v2(y1,y2)
+                on v1.x1 = v2.y2) subq1
+     on (i1.f1 = subq1.x2)
+   on (t1.unique2 = subq1.d1)
+   left join tenk1 t2
+   on (subq1.y1 = t2.unique1)
+ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+                               QUERY PLAN                               
+ -----------------------------------------------------------------------
+  Nested Loop
+    Join Filter: (t1.stringu1 > t2.stringu2)
+    ->  Nested Loop
+          Join Filter: ((0) = i1.f1)
+          ->  Nested Loop
+                ->  Nested Loop
+                      Join Filter: ((1) = (1))
+                      ->  Result
+                      ->  Result
+                ->  Index Scan using tenk1_unique2 on tenk1 t1
+                      Index Cond: ((unique2 = (11)) AND (unique2 < 42))
+          ->  Seq Scan on int4_tbl i1
+    ->  Index Scan using tenk1_unique1 on tenk1 t2
+          Index Cond: (unique1 = (3))
+ (14 rows)
+ select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+   tenk1 t1
+   inner join int4_tbl i1
+     left join (select v1.x2, v2.y1, 11 AS d1
+                from (values(1,0)) v1(x1,x2)
+                left join (values(3,1)) v2(y1,y2)
+                on v1.x1 = v2.y2) subq1
+     on (i1.f1 = subq1.x2)
+   on (t1.unique2 = subq1.d1)
+   left join tenk1 t2
+   on (subq1.y1 = t2.unique1)
+ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+  unique2 | stringu1 | unique1 | stringu2 
+ ---------+----------+---------+----------
+       11 | WFAAAA   |       3 | LKIAAA
+ (1 row)
+ -- variant that isn't quite a star-schema case
+ select ss1.d1 from
+   tenk1 as t1
+   inner join tenk1 as t2
+   on t1.tenthous = t2.ten
+   inner join
+     int8_tbl as i8
+     left join int4_tbl as i4
+       inner join (select 64::information_schema.cardinal_number as d1
+                   from tenk1 t3,
+                        lateral (select abs(t3.unique1) + random()) ss0(x)
+                   where t3.fivethous < 0) as ss1
+       on i4.f1 = ss1.d1
+     on i8.q1 = i4.f1
+   on t1.tenthous = ss1.d1
+ where t1.unique1 < i4.f1;
+  d1 
+ ----
+ (0 rows)
  --
  -- test extraction of restriction OR clauses from join OR clause
  -- (we used to only do this for indexable clauses)
@@@ -3610,14 -4000,15 +4263,16 @@@ select d.* from d left join (select * f
  explain (costs off)
  select d.* from d left join (select distinct * from b) s
    on d.a = s.id and d.b = s.c_id;
 -  QUERY PLAN   
 ----------------
 - Seq Scan on d
 -(1 row)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Seq Scan on d
 +(2 rows)
  
  -- join removal is not possible when the GROUP BY contains a column that is
- -- not in the join condition
+ -- not in the join condition.  (Note: as of 9.6, we notice that b.id is a
+ -- primary key and so drop b.c_id from the GROUP BY of the resulting plan;
+ -- but this happens too late for join removal in the outer plan level.)
  explain (costs off)
  select d.* from d left join (select * from b group by b.id, b.c_id) s
    on d.a = s.id;
@@@ -3794,11 -4175,68 +4449,68 @@@ SELECT * FRO
  ---+------------------+-------------------+------------------
   1 |              123 |               456 |              123
   1 |              123 |  4567890123456789 |              123
 + 1 | 4567890123456789 | -4567890123456789 | 4567890123456789
   1 | 4567890123456789 |               123 |               42
   1 | 4567890123456789 |  4567890123456789 | 4567890123456789
 - 1 | 4567890123456789 | -4567890123456789 | 4567890123456789
  (5 rows)
  
+ rollback;
+ -- another join removal bug: we must clean up correctly when removing a PHV
+ begin;
+ create temp table uniquetbl (f1 text unique);
+ explain (costs off)
+ select t1.* from
+   uniquetbl as t1
+   left join (select *, '***'::text as d1 from uniquetbl) t2
+   on t1.f1 = t2.f1
+   left join uniquetbl t3
+   on t2.d1 = t3.f1;
+         QUERY PLAN        
+ --------------------------
+  Seq Scan on uniquetbl t1
+ (1 row)
+ explain (costs off)
+ select t0.*
+ from
+  text_tbl t0
+  left join
+    (select case t1.ten when 0 then 'doh!'::text else null::text end as case1,
+            t1.stringu2
+      from tenk1 t1
+      join int4_tbl i4 ON i4.f1 = t1.unique2
+      left join uniquetbl u1 ON u1.f1 = t1.string4) ss
+   on t0.f1 = ss.case1
+ where ss.stringu2 !~* ss.case1;
+                                          QUERY PLAN                                         
+ --------------------------------------------------------------------------------------------
+  Nested Loop
+    Join Filter: (CASE t1.ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END = t0.f1)
+    ->  Nested Loop
+          ->  Seq Scan on int4_tbl i4
+          ->  Index Scan using tenk1_unique2 on tenk1 t1
+                Index Cond: (unique2 = i4.f1)
+                Filter: (stringu2 !~* CASE ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END)
+    ->  Materialize
+          ->  Seq Scan on text_tbl t0
+ (9 rows)
+ select t0.*
+ from
+  text_tbl t0
+  left join
+    (select case t1.ten when 0 then 'doh!'::text else null::text end as case1,
+            t1.stringu2
+      from tenk1 t1
+      join int4_tbl i4 ON i4.f1 = t1.unique2
+      left join uniquetbl u1 ON u1.f1 = t1.string4) ss
+   on t0.f1 = ss.case1
+ where ss.stringu2 !~* ss.case1;
+   f1  
+ ------
+  doh!
+ (1 row)
  rollback;
  -- bug #8444: we've historically allowed duplicate aliases within aliased JOINs
  select * from
index 7c655ca8b2a792d79a03678d5d43364a50ed59da,e7d0ad1d86d1b2ccfa28a55614300ae3e1d8532d..ddcf57a5b1e1f64808fd9743eac364ddb5d47f5d
@@@ -18,19 -18,16 +18,18 @@@ SELECT * FROM mvtest_tv ORDER BY type
  
  -- create a materialized view with no data, and confirm correct behavior
  EXPLAIN (costs off)
 -  CREATE MATERIALIZED VIEW mvtest_tm AS SELECT type, sum(amt) AS totamt FROM mvtest_t GROUP BY type WITH NO DATA;
 -         QUERY PLAN         
 -----------------------------
 +  CREATE MATERIALIZED VIEW tm AS SELECT type, sum(amt) AS totamt FROM t GROUP BY type WITH NO DATA;
 +                        QUERY PLAN                         
 +-----------------------------------------------------------
   HashAggregate
     Group Key: type
 -   ->  Seq Scan on mvtest_t
 -(3 rows)
 -
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  HashAggregate
 +               Group Key: type
 +               ->  Seq Scan on t
 +(6 rows)
- CREATE MATERIALIZED VIEW tm AS SELECT type, sum(amt) AS totamt FROM t GROUP BY type WITH NO DATA;
- SELECT relispopulated FROM pg_class WHERE oid = 'tm'::regclass;
+ CREATE MATERIALIZED VIEW mvtest_tm AS SELECT type, sum(amt) AS totamt FROM mvtest_t GROUP BY type WITH NO DATA;
+ SELECT relispopulated FROM pg_class WHERE oid = 'mvtest_tm'::regclass;
   relispopulated 
  ----------------
   f
@@@ -57,21 -54,18 +56,21 @@@ SELECT * FROM mvtest_tm
  
  -- create various views
  EXPLAIN (costs off)
 -  CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type;
 -            QUERY PLAN            
 -----------------------------------
 +  CREATE MATERIALIZED VIEW tvm AS SELECT * FROM tv ORDER BY type;
 +                           QUERY PLAN                            
 +-----------------------------------------------------------------
   Sort
-    Sort Key: t.type
+    Sort Key: mvtest_t.type
     ->  HashAggregate
 -         Group Key: mvtest_t.type
 -         ->  Seq Scan on mvtest_t
 -(5 rows)
 +         Group Key: t.type
 +         ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +               ->  HashAggregate
 +                     Group Key: t.type
 +                     ->  Seq Scan on t
 +(8 rows)
  
- CREATE MATERIALIZED VIEW tvm AS SELECT * FROM tv ORDER BY type;
- SELECT * FROM tvm;
+ CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type;
+ SELECT * FROM mvtest_tvm;
   type | totamt 
  ------+--------
   x    |      5
   z    |     11
  (3 rows)
  
- CREATE MATERIALIZED VIEW tmm AS SELECT sum(totamt) AS grandtot FROM tm;
- CREATE MATERIALIZED VIEW tvmm AS SELECT sum(totamt) AS grandtot FROM tvm;
- CREATE UNIQUE INDEX tvmm_expr ON tvmm ((grandtot > 0));
- CREATE UNIQUE INDEX tvmm_pred ON tvmm (grandtot) WHERE grandtot < 0;
- CREATE VIEW tvv AS SELECT sum(totamt) AS grandtot FROM tv;
+ CREATE MATERIALIZED VIEW mvtest_tmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tm;
+ CREATE MATERIALIZED VIEW mvtest_tvmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tvm;
+ CREATE UNIQUE INDEX mvtest_tvmm_expr ON mvtest_tvmm ((grandtot > 0));
+ CREATE UNIQUE INDEX mvtest_tvmm_pred ON mvtest_tvmm (grandtot) WHERE grandtot < 0;
+ CREATE VIEW mvtest_tvv AS SELECT sum(totamt) AS grandtot FROM mvtest_tv;
  EXPLAIN (costs off)
 -  CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv;
 -            QUERY PLAN            
 -----------------------------------
 +  CREATE MATERIALIZED VIEW tvvm AS SELECT * FROM tvv;
 +                           QUERY PLAN                            
 +-----------------------------------------------------------------
   Aggregate
     ->  HashAggregate
 -         Group Key: mvtest_t.type
 -         ->  Seq Scan on mvtest_t
 -(4 rows)
 +         Group Key: t.type
 +         ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +               ->  HashAggregate
 +                     Group Key: t.type
 +                     ->  Seq Scan on t
 +(7 rows)
  
- CREATE MATERIALIZED VIEW tvvm AS SELECT * FROM tvv;
- CREATE VIEW tvvmv AS SELECT * FROM tvvm;
- CREATE MATERIALIZED VIEW bb AS SELECT * FROM tvvmv;
- CREATE INDEX aa ON bb (grandtot);
+ CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv;
+ CREATE VIEW mvtest_tvvmv AS SELECT * FROM mvtest_tvvm;
+ CREATE MATERIALIZED VIEW mvtest_bb AS SELECT * FROM mvtest_tvvmv;
+ CREATE INDEX mvtest_aa ON mvtest_bb (grandtot);
  -- check that plans seem reasonable
- \d+ tvm
-                     Materialized view "public.tvm"
+ \d+ mvtest_tvm
+                 Materialized view "public.mvtest_tvm"
   Column |  Type   | Modifiers | Storage  | Stats target | Description 
  --------+---------+-----------+----------+--------------+-------------
   type   | text    |           | extended |              | 
@@@ -200,10 -192,10 +199,10 @@@ SELECT * FROM tm ORDER BY type
  ------+--------
   x    |      5
   y    |     12
 - z    |     24
 + z    |     11
  (3 rows)
  
- SELECT * FROM tvm ORDER BY type;
+ SELECT * FROM mvtest_tvm ORDER BY type;
   type | totamt 
  ------+--------
   x    |      5
@@@ -273,19 -265,19 +272,19 @@@ EXPLAIN (costs off
  (1 row)
  
  EXPLAIN (costs off)
-   SELECT * FROM tvvm;
-     QUERY PLAN    
- ------------------
-  Seq Scan on tvvm
+   SELECT * FROM mvtest_tvvm;
+        QUERY PLAN        
+ -------------------------
+  Seq Scan on mvtest_tvvm
  (1 row)
  
- SELECT * FROM tmm;
+ SELECT * FROM mvtest_tmm;
   grandtot 
  ----------
 -       41
 +       28
  (1 row)
  
- SELECT * FROM tvmm;
+ SELECT * FROM mvtest_tvmm;
   grandtot 
  ----------
         41
@@@ -410,38 -400,42 +409,38 @@@ SELECT * FROM hogeview WHERE i < 10
  ---
  (0 rows)
  
- DROP TABLE hoge CASCADE;
- NOTICE:  drop cascades to materialized view hogeview
+ DROP TABLE mvtest_huge CASCADE;
+ NOTICE:  drop cascades to materialized view mvtest_hugeview
  -- test that duplicate values on unique index prevent refresh
- CREATE TABLE foo(a, b) AS VALUES(1, 10);
- CREATE MATERIALIZED VIEW mv AS SELECT * FROM foo;
- CREATE UNIQUE INDEX ON mv(a);
- INSERT INTO foo SELECT * FROM foo;
- REFRESH MATERIALIZED VIEW mv;
- ERROR:  could not create unique index "mv_a_idx"
+ CREATE TABLE mvtest_foo(a, b) AS VALUES(1, 10);
+ CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo;
+ CREATE UNIQUE INDEX ON mvtest_mv(a);
+ INSERT INTO mvtest_foo SELECT * FROM mvtest_foo;
+ REFRESH MATERIALIZED VIEW mvtest_mv;
+ ERROR:  could not create unique index "mvtest_mv_a_idx"
  DETAIL:  Key (a)=(1) is duplicated.
 -REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv;
 -ERROR:  new data for materialized view "mvtest_mv" contains duplicate rows without any null columns
 -DETAIL:  Row: (1,10)
 -DROP TABLE mvtest_foo CASCADE;
 -NOTICE:  drop cascades to materialized view mvtest_mv
 +DROP TABLE foo CASCADE;
 +NOTICE:  drop cascades to materialized view mv
  -- make sure that all columns covered by unique indexes works
 -CREATE TABLE mvtest_foo(a, b, c) AS VALUES(1, 2, 3);
 -CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo;
 -CREATE UNIQUE INDEX ON mvtest_mv (a);
 -CREATE UNIQUE INDEX ON mvtest_mv (b);
 -CREATE UNIQUE INDEX on mvtest_mv (c);
 -INSERT INTO mvtest_foo VALUES(2, 3, 4);
 -INSERT INTO mvtest_foo VALUES(3, 4, 5);
 -REFRESH MATERIALIZED VIEW mvtest_mv;
 -REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv;
 -DROP TABLE mvtest_foo CASCADE;
 -NOTICE:  drop cascades to materialized view mvtest_mv
 +CREATE TABLE foo(a, b, c) AS VALUES(1, 2, 3);
 +CREATE MATERIALIZED VIEW mv AS SELECT * FROM foo;
 +CREATE UNIQUE INDEX ON mv (a);
 +CREATE UNIQUE INDEX ON mv (b);
 +CREATE UNIQUE INDEX on mv (c);
 +INSERT INTO foo VALUES(2, 3, 4);
 +INSERT INTO foo VALUES(3, 4, 5);
 +REFRESH MATERIALIZED VIEW mv;
 +DROP TABLE foo CASCADE;
 +NOTICE:  drop cascades to materialized view mv
  -- allow subquery to reference unpopulated matview if WITH NO DATA is specified
- CREATE MATERIALIZED VIEW mv1 AS SELECT 1 AS col1 WITH NO DATA;
- CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM mv1
-   WHERE col1 = (SELECT LEAST(col1) FROM mv1) WITH NO DATA;
- DROP MATERIALIZED VIEW mv1 CASCADE;
- NOTICE:  drop cascades to materialized view mv2
+ CREATE MATERIALIZED VIEW mvtest_mv1 AS SELECT 1 AS col1 WITH NO DATA;
+ CREATE MATERIALIZED VIEW mvtest_mv2 AS SELECT * FROM mvtest_mv1
+   WHERE col1 = (SELECT LEAST(col1) FROM mvtest_mv1) WITH NO DATA;
+ DROP MATERIALIZED VIEW mvtest_mv1 CASCADE;
+ NOTICE:  drop cascades to materialized view mvtest_mv2
  -- make sure that types with unusual equality tests work
- CREATE TABLE boxes (id serial primary key, b box);
- INSERT INTO boxes (b) VALUES
+ CREATE TABLE mvtest_boxes (id serial primary key, b box);
+ INSERT INTO mvtest_boxes (b) VALUES
    ('(32,32),(31,31)'),
    ('(2.0000004,2.0000004),(1,1)'),
    ('(1.9999996,1.9999996),(1,1)');
@@@ -456,75 -451,134 +455,104 @@@ SELECT * FROM boxmv ORDER BY id
    3 | (1.9999996,1.9999996),(1,1)
  (3 rows)
  
- DROP TABLE boxes CASCADE;
- NOTICE:  drop cascades to materialized view boxmv
+ DROP TABLE mvtest_boxes CASCADE;
+ NOTICE:  drop cascades to materialized view mvtest_boxmv
  -- make sure that column names are handled correctly
 -CREATE TABLE mvtest_v (i int, j int);
 -CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj, kk) AS SELECT i, j FROM mvtest_v; -- error
 -ERROR:  too many column names were specified
 -CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj) AS SELECT i, j FROM mvtest_v; -- ok
 -CREATE MATERIALIZED VIEW mvtest_mv_v_2 (ii) AS SELECT i, j FROM mvtest_v; -- ok
 -CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj, kk) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- error
 -ERROR:  too many column names were specified
 -CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok
 -CREATE MATERIALIZED VIEW mvtest_mv_v_4 (ii) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok
 -ALTER TABLE mvtest_v RENAME COLUMN i TO x;
 -INSERT INTO mvtest_v values (1, 2);
 -CREATE UNIQUE INDEX mvtest_mv_v_ii ON mvtest_mv_v (ii);
 -REFRESH MATERIALIZED VIEW mvtest_mv_v;
 -UPDATE mvtest_v SET j = 3 WHERE x = 1;
 -REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_v;
 -REFRESH MATERIALIZED VIEW mvtest_mv_v_2;
 -REFRESH MATERIALIZED VIEW mvtest_mv_v_3;
 -REFRESH MATERIALIZED VIEW mvtest_mv_v_4;
 -SELECT * FROM mvtest_v;
 +CREATE TABLE v (i int, j int);
 +CREATE MATERIALIZED VIEW mv_v (ii) AS SELECT i, j AS jj FROM v;
 +ALTER TABLE v RENAME COLUMN i TO x;
 +INSERT INTO v values (1, 2);
 +CREATE UNIQUE INDEX mv_v_ii ON mv_v (ii);
 +REFRESH MATERIALIZED VIEW mv_v;
 +UPDATE v SET j = 3 WHERE x = 1;
 +SELECT * FROM v;
   x | j 
  ---+---
   1 | 3
  (1 row)
  
- SELECT * FROM mv_v;
+ SELECT * FROM mvtest_mv_v;
   ii | jj 
  ----+----
 -  1 |  3
 +  1 |  2
  (1 row)
  
- DROP TABLE v CASCADE;
- NOTICE:  drop cascades to materialized view mv_v
+ SELECT * FROM mvtest_mv_v_2;
+  ii | j 
+ ----+---
+   1 | 3
+ (1 row)
+ SELECT * FROM mvtest_mv_v_3;
+  ii | jj 
+ ----+----
+   1 |  3
+ (1 row)
+ SELECT * FROM mvtest_mv_v_4;
+  ii | j 
+ ----+---
+   1 | 3
+ (1 row)
+ DROP TABLE mvtest_v CASCADE;
+ NOTICE:  drop cascades to 4 other objects
+ DETAIL:  drop cascades to materialized view mvtest_mv_v
+ drop cascades to materialized view mvtest_mv_v_2
+ drop cascades to materialized view mvtest_mv_v_3
+ drop cascades to materialized view mvtest_mv_v_4
+ -- make sure that create WITH NO DATA does not plan the query (bug #13907)
+ create materialized view mvtest_error as select 1/0 as x;  -- fail
+ ERROR:  division by zero
+ create materialized view mvtest_error as select 1/0 as x with no data;
+ refresh materialized view mvtest_error;  -- fail here
+ ERROR:  division by zero
+ drop materialized view mvtest_error;
  -- make sure that matview rows can be referenced as source rows (bug #9398)
 -CREATE TABLE mvtest_v AS SELECT generate_series(1,10) AS a;
 -CREATE MATERIALIZED VIEW mvtest_mv_v AS SELECT a FROM mvtest_v WHERE a <= 5;
 -DELETE FROM mvtest_v WHERE EXISTS ( SELECT * FROM mvtest_mv_v WHERE mvtest_mv_v.a = mvtest_v.a );
 -SELECT * FROM mvtest_v;
 +CREATE TABLE v AS SELECT generate_series(1,10) AS a;
 +CREATE MATERIALIZED VIEW mv_v AS SELECT a FROM v WHERE a <= 5;
 +DELETE FROM v WHERE EXISTS ( SELECT * FROM mv_v WHERE mv_v.a = v.a );
 +ERROR:  could not plan this distributed delete
 +DETAIL:  correlated or complex DELETE is currently not supported in Postgres-XL.
 +SELECT * FROM v;
   a  
  ----
 +  1
 +  2
 +  5
    6
 -  7
    8
    9
 +  3
 +  4
 +  7
   10
 -(5 rows)
 +(10 rows)
  
- SELECT * FROM mv_v;
+ SELECT * FROM mvtest_mv_v;
   a 
  ---
   1
   2
 + 5
   3
   4
 - 5
  (5 rows)
  
- DROP TABLE v CASCADE;
- NOTICE:  drop cascades to materialized view mv_v
+ DROP TABLE mvtest_v CASCADE;
+ NOTICE:  drop cascades to materialized view mvtest_mv_v
  -- make sure running as superuser works when MV owned by another role (bug #11208)
- CREATE ROLE user_dw;
- SET ROLE user_dw;
- CREATE TABLE foo_data AS SELECT i, md5(random()::text)
+ CREATE ROLE regress_user_mvtest;
+ SET ROLE regress_user_mvtest;
+ CREATE TABLE mvtest_foo_data AS SELECT i, md5(random()::text)
    FROM generate_series(1, 10) i;
- CREATE MATERIALIZED VIEW mv_foo AS SELECT * FROM foo_data;
- CREATE MATERIALIZED VIEW mv_foo AS SELECT * FROM foo_data;
- ERROR:  relation "mv_foo" already exists
- CREATE MATERIALIZED VIEW IF NOT EXISTS mv_foo AS SELECT * FROM foo_data;
- NOTICE:  relation "mv_foo" already exists, skipping
- CREATE UNIQUE INDEX ON mv_foo (i);
+ CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ ERROR:  relation "mvtest_mv_foo" already exists
+ CREATE MATERIALIZED VIEW IF NOT EXISTS mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ NOTICE:  relation "mvtest_mv_foo" already exists, skipping
+ CREATE UNIQUE INDEX ON mvtest_mv_foo (i);
  RESET ROLE;
 -REFRESH MATERIALIZED VIEW mvtest_mv_foo;
 -REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_foo;
 -DROP OWNED BY regress_user_mvtest CASCADE;
 -DROP ROLE regress_user_mvtest;
 --- make sure that create WITH NO DATA works via SPI
 -BEGIN;
 -CREATE FUNCTION mvtest_func()
 -  RETURNS void AS $$
 -BEGIN
 -  CREATE MATERIALIZED VIEW mvtest1 AS SELECT 1 AS x;
 -  CREATE MATERIALIZED VIEW mvtest2 AS SELECT 1 AS x WITH NO DATA;
 -END;
 -$$ LANGUAGE plpgsql;
 -SELECT mvtest_func();
 - mvtest_func 
 --------------
 - 
 -(1 row)
 -
 -SELECT * FROM mvtest1;
 - x 
 ----
 - 1
 -(1 row)
 -
 -SELECT * FROM mvtest2;
 -ERROR:  materialized view "mvtest2" has not been populated
 -HINT:  Use the REFRESH MATERIALIZED VIEW command.
 -ROLLBACK;
 +REFRESH MATERIALIZED VIEW mv_foo;
 +DROP OWNED BY user_dw CASCADE;
 +DROP ROLE user_dw;
Simple merge
Simple merge
index 83160cd439c31af67d67f2aa8e3e85b26b96215a,7e81cdd087f4e9b780b5d283b1aa74a8aab39845..77450ba3df68fa87b253f663e7a85784bc7b6ee8
@@@ -210,8 -438,8 +216,9 @@@ ERROR:  relation "addr_nsp.genftable" d
  ---
  --- Cleanup resources
  ---
+ SET client_min_messages TO 'warning';
  DROP FOREIGN DATA WRAPPER addr_fdw CASCADE;
 +ERROR:  foreign-data wrapper "addr_fdw" does not exist
  DROP SCHEMA addr_nsp CASCADE;
- DROP OWNED BY regtest_addr_user;
- DROP USER regtest_addr_user;
+ DROP OWNED BY regress_addr_user;
+ DROP USER regress_addr_user;
index cd17367d70e53b0ca0464fd0f4bc3a4dce3286d5,a2c36e44ba0aee3bff52c4884d855cff15cb7c45..96d91fe48671909d8dfb715a8041937a80d01c62
@@@ -2032,16 -2036,13 +2036,14 @@@ begi
    insert into foo values(x);
    return x;
  end$$ language plpgsql;
- set statement_timeout to 2000;
- select blockme();
- NOTICE:  nyeah nyeah, can't stop me
-  blockme 
- ---------
-       20
+ select subxact_rollback_semantics();
+  subxact_rollback_semantics 
+ ----------------------------
+                          20
  (1 row)
  
 -select * from foo;
 +reset statement_timeout;
 +select * from foo order by 1;
   f1 
  ----
    1
index f2356d460e49403175301184f814218b551d5cf3,39a60a56191b77da4d4ea478b6be159e234a6bf5..1b539c3b573f2c02de889d142fe22a4ad6f7b53b
@@@ -172,8 -178,7 +178,9 @@@ Indexes
      "test_replica_identity_hash" hash (nonkey)
      "test_replica_identity_keyab" btree (keya, keyb)
  Replica Identity: FULL
 +Distribute By: REPLICATION
 +Location Nodes: ALL DATANODES
+ Has OIDs: yes
  
  ALTER TABLE test_replica_identity REPLICA IDENTITY NOTHING;
  SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass;
index b7fded9355673d969f5ad6b7686cad6831355d62,570aa5f8343f62b3c8b3389188699002b1f60b2d..f26ddff419a506a598740574a268d3a837e03914
  -- default for superuser is false
 +CREATE ROLE test_def_superuser;
 +SELECT * FROM pg_authid WHERE rolname = 'test_def_superuser';
 +      rolname       | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_def_superuser | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +CREATE ROLE test_superuser WITH SUPERUSER;
 +SELECT * FROM pg_authid WHERE rolname = 'test_superuser';
 +    rolname     | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_superuser | t        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_superuser WITH NOSUPERUSER;
 +SELECT * FROM pg_authid WHERE rolname = 'test_superuser';
 +    rolname     | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_superuser | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_superuser WITH SUPERUSER;
 +SELECT * FROM pg_authid WHERE rolname = 'test_superuser';
 +    rolname     | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_superuser | t        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +-- default for inherit is true
 +CREATE ROLE test_def_inherit;
 +SELECT * FROM pg_authid WHERE rolname = 'test_def_inherit';
 +     rolname      | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_def_inherit | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +CREATE ROLE test_inherit WITH NOINHERIT;
 +SELECT * FROM pg_authid WHERE rolname = 'test_inherit';
 +   rolname    | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_inherit | f        | f          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_inherit WITH INHERIT;
 +SELECT * FROM pg_authid WHERE rolname = 'test_inherit';
 +   rolname    | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_inherit | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_inherit WITH NOINHERIT;
 +SELECT * FROM pg_authid WHERE rolname = 'test_inherit';
 +   rolname    | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_inherit | f        | f          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +-- default for create role is false
 +CREATE ROLE test_def_createrole;
 +SELECT * FROM pg_authid WHERE rolname = 'test_def_createrole';
 +       rolname       | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +---------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_def_createrole | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +CREATE ROLE test_createrole WITH CREATEROLE;
 +SELECT * FROM pg_authid WHERE rolname = 'test_createrole';
 +     rolname     | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +-----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_createrole | f        | t          | t             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_createrole WITH NOCREATEROLE;
 +SELECT * FROM pg_authid WHERE rolname = 'test_createrole';
 +     rolname     | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +-----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_createrole | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_createrole WITH CREATEROLE;
 +SELECT * FROM pg_authid WHERE rolname = 'test_createrole';
 +     rolname     | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +-----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_createrole | f        | t          | t             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +-- default for create database is false
 +CREATE ROLE test_def_createdb;
 +SELECT * FROM pg_authid WHERE rolname = 'test_def_createdb';
 +      rolname      | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +-------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_def_createdb | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +CREATE ROLE test_createdb WITH CREATEDB;
 +SELECT * FROM pg_authid WHERE rolname = 'test_createdb';
 +    rolname    | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +---------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_createdb | f        | t          | f             | t           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_createdb WITH NOCREATEDB;
 +SELECT * FROM pg_authid WHERE rolname = 'test_createdb';
 +    rolname    | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +---------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_createdb | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_createdb WITH CREATEDB;
 +SELECT * FROM pg_authid WHERE rolname = 'test_createdb';
 +    rolname    | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +---------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_createdb | f        | t          | f             | t           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +-- default for can login is false for role
 +CREATE ROLE test_def_role_canlogin;
 +SELECT * FROM pg_authid WHERE rolname = 'test_def_role_canlogin';
 +        rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_def_role_canlogin | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +CREATE ROLE test_role_canlogin WITH LOGIN;
 +SELECT * FROM pg_authid WHERE rolname = 'test_role_canlogin';
 +      rolname       | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_role_canlogin | f        | t          | f             | f           | t           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_role_canlogin WITH NOLOGIN;
 +SELECT * FROM pg_authid WHERE rolname = 'test_role_canlogin';
 +      rolname       | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_role_canlogin | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_role_canlogin WITH LOGIN;
 +SELECT * FROM pg_authid WHERE rolname = 'test_role_canlogin';
 +      rolname       | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_role_canlogin | f        | t          | f             | f           | t           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +-- default for can login is true for user
 +CREATE USER test_def_user_canlogin;
 +SELECT * FROM pg_authid WHERE rolname = 'test_def_user_canlogin';
 +        rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_def_user_canlogin | f        | t          | f             | f           | t           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +CREATE USER test_user_canlogin WITH NOLOGIN;
 +SELECT * FROM pg_authid WHERE rolname = 'test_user_canlogin';
 +      rolname       | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_user_canlogin | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER USER test_user_canlogin WITH LOGIN;
 +SELECT * FROM pg_authid WHERE rolname = 'test_user_canlogin';
 +      rolname       | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_user_canlogin | f        | t          | f             | f           | t           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER USER test_user_canlogin WITH NOLOGIN;
 +SELECT * FROM pg_authid WHERE rolname = 'test_user_canlogin';
 +      rolname       | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_user_canlogin | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +-- default for replication is false
 +CREATE ROLE test_def_replication;
 +SELECT * FROM pg_authid WHERE rolname = 'test_def_replication';
 +       rolname        | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_def_replication | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +CREATE ROLE test_replication WITH REPLICATION;
 +SELECT * FROM pg_authid WHERE rolname = 'test_replication';
 +     rolname      | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_replication | f        | t          | f             | f           | f           | t              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_replication WITH NOREPLICATION;
 +SELECT * FROM pg_authid WHERE rolname = 'test_replication';
 +     rolname      | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_replication | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_replication WITH REPLICATION;
 +SELECT * FROM pg_authid WHERE rolname = 'test_replication';
 +     rolname      | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_replication | f        | t          | f             | f           | f           | t              | f            |           -1 |             | 
 +(1 row)
 +
 +-- default for bypassrls is false
 +CREATE ROLE test_def_bypassrls;
 +SELECT * FROM pg_authid WHERE rolname = 'test_def_bypassrls';
 +      rolname       | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +--------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_def_bypassrls | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +CREATE ROLE test_bypassrls WITH BYPASSRLS;
 +SELECT * FROM pg_authid WHERE rolname = 'test_bypassrls';
 +    rolname     | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_bypassrls | f        | t          | f             | f           | f           | f              | t            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_bypassrls WITH NOBYPASSRLS;
 +SELECT * FROM pg_authid WHERE rolname = 'test_bypassrls';
 +    rolname     | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_bypassrls | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
 +(1 row)
 +
 +ALTER ROLE test_bypassrls WITH BYPASSRLS;
 +SELECT * FROM pg_authid WHERE rolname = 'test_bypassrls';
 +    rolname     | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
 +----------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
 + test_bypassrls | f        | t          | f             | f           | f           | f              | t            |           -1 |             | 
 +(1 row)
 +
 +-- remove the one role with LOGIN rights
 +DROP ROLE test_role_canlogin;
 +-- other roles not removed to test pg_dumpall role dump through
 +-- pg_upgrade
+ CREATE ROLE regress_test_def_superuser;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_superuser';
+           rolname           | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_def_superuser | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ CREATE ROLE regress_test_superuser WITH SUPERUSER;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_superuser';
+         rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_superuser | t        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_superuser WITH NOSUPERUSER;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_superuser';
+         rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_superuser | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_superuser WITH SUPERUSER;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_superuser';
+         rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_superuser | t        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ -- default for inherit is true
+ CREATE ROLE regress_test_def_inherit;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_inherit';
+          rolname          | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ --------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_def_inherit | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ CREATE ROLE regress_test_inherit WITH NOINHERIT;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_inherit';
+        rolname        | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_inherit | f        | f          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_inherit WITH INHERIT;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_inherit';
+        rolname        | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_inherit | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_inherit WITH NOINHERIT;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_inherit';
+        rolname        | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_inherit | f        | f          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ -- default for create role is false
+ CREATE ROLE regress_test_def_createrole;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_createrole';
+            rolname           | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ -----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_def_createrole | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ CREATE ROLE regress_test_createrole WITH CREATEROLE;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createrole';
+          rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ -------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_createrole | f        | t          | t             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_createrole WITH NOCREATEROLE;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createrole';
+          rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ -------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_createrole | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_createrole WITH CREATEROLE;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createrole';
+          rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ -------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_createrole | f        | t          | t             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ -- default for create database is false
+ CREATE ROLE regress_test_def_createdb;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_createdb';
+           rolname          | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ---------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_def_createdb | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ CREATE ROLE regress_test_createdb WITH CREATEDB;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createdb';
+         rolname        | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ -----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_createdb | f        | t          | f             | t           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_createdb WITH NOCREATEDB;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createdb';
+         rolname        | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ -----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_createdb | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_createdb WITH CREATEDB;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_createdb';
+         rolname        | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ -----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_createdb | f        | t          | f             | t           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ -- default for can login is false for role
+ CREATE ROLE regress_test_def_role_canlogin;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_role_canlogin';
+             rolname             | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ --------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_def_role_canlogin | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ CREATE ROLE regress_test_role_canlogin WITH LOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_role_canlogin';
+           rolname           | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_role_canlogin | f        | t          | f             | f           | t           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_role_canlogin WITH NOLOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_role_canlogin';
+           rolname           | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_role_canlogin | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_role_canlogin WITH LOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_role_canlogin';
+           rolname           | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_role_canlogin | f        | t          | f             | f           | t           | f              | f            |           -1 |             | 
+ (1 row)
+ -- default for can login is true for user
+ CREATE USER regress_test_def_user_canlogin;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_user_canlogin';
+             rolname             | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ --------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_def_user_canlogin | f        | t          | f             | f           | t           | f              | f            |           -1 |             | 
+ (1 row)
+ CREATE USER regress_test_user_canlogin WITH NOLOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_user_canlogin';
+           rolname           | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_user_canlogin | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER USER regress_test_user_canlogin WITH LOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_user_canlogin';
+           rolname           | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_user_canlogin | f        | t          | f             | f           | t           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER USER regress_test_user_canlogin WITH NOLOGIN;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_user_canlogin';
+           rolname           | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_user_canlogin | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ -- default for replication is false
+ CREATE ROLE regress_test_def_replication;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_replication';
+            rolname            | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_def_replication | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ CREATE ROLE regress_test_replication WITH REPLICATION;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_replication';
+          rolname          | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ --------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_replication | f        | t          | f             | f           | f           | t              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_replication WITH NOREPLICATION;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_replication';
+          rolname          | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ --------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_replication | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_replication WITH REPLICATION;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_replication';
+          rolname          | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ --------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_replication | f        | t          | f             | f           | f           | t              | f            |           -1 |             | 
+ (1 row)
+ -- default for bypassrls is false
+ CREATE ROLE regress_test_def_bypassrls;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_def_bypassrls';
+           rolname           | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_def_bypassrls | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ CREATE ROLE regress_test_bypassrls WITH BYPASSRLS;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_bypassrls';
+         rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_bypassrls | f        | t          | f             | f           | f           | f              | t            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_bypassrls WITH NOBYPASSRLS;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_bypassrls';
+         rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_bypassrls | f        | t          | f             | f           | f           | f              | f            |           -1 |             | 
+ (1 row)
+ ALTER ROLE regress_test_bypassrls WITH BYPASSRLS;
+ SELECT * FROM pg_authid WHERE rolname = 'regress_test_bypassrls';
+         rolname         | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil 
+ ------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+---------------
+  regress_test_bypassrls | f        | t          | f             | f           | f           | f              | t            |           -1 |             | 
+ (1 row)
+ -- clean up roles
+ DROP ROLE regress_test_def_superuser;
+ DROP ROLE regress_test_superuser;
+ DROP ROLE regress_test_def_inherit;
+ DROP ROLE regress_test_inherit;
+ DROP ROLE regress_test_def_createrole;
+ DROP ROLE regress_test_createrole;
+ DROP ROLE regress_test_def_createdb;
+ DROP ROLE regress_test_createdb;
+ DROP ROLE regress_test_def_role_canlogin;
+ DROP ROLE regress_test_role_canlogin;
+ DROP USER regress_test_def_user_canlogin;
+ DROP USER regress_test_user_canlogin;
+ DROP ROLE regress_test_def_replication;
+ DROP ROLE regress_test_replication;
+ DROP ROLE regress_test_def_bypassrls;
+ DROP ROLE regress_test_bypassrls;
index 78d9f54a370957b53ba55d342309c294b52ecf5b,c15bf958a51732745cb95839bd1ebd6a4426dc90..16a2eda3c9eda2d31aafbfd90bf0941fc790fdae
@@@ -76,62 -72,90 +72,62 @@@ ALTER TABLE document ENABLE ROW LEVEL S
  -- user's security level must be higher than or equal to document's
  CREATE POLICY p1 ON document
      USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user));
- -- viewpoint from rls_regress_user1
- SET SESSION AUTHORIZATION rls_regress_user1;
+ -- viewpoint from regress_rls_bob
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SET row_security TO ON;
  SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did;
 -NOTICE:  f_leak => my first novel
 -NOTICE:  f_leak => my first manga
 -NOTICE:  f_leak => great science fiction
 -NOTICE:  f_leak => great manga
   did | cid | dlevel |      dauthor      |        dtitle         
  -----+-----+--------+-------------------+-----------------------
-    1 |  11 |      1 | rls_regress_user1 | my first novel
-    4 |  44 |      1 | rls_regress_user1 | my first manga
-    6 |  22 |      1 | rls_regress_user2 | great science fiction
-    8 |  44 |      1 | rls_regress_user2 | great manga
+    1 |  11 |      1 | regress_rls_bob   | my first novel
+    4 |  44 |      1 | regress_rls_bob   | my first manga
+    6 |  22 |      1 | regress_rls_carol | great science fiction
+    8 |  44 |      1 | regress_rls_carol | great manga
  (4 rows)
  
  SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did;
 -NOTICE:  f_leak => my first novel
 -NOTICE:  f_leak => my first manga
 -NOTICE:  f_leak => great science fiction
 -NOTICE:  f_leak => great manga
   cid | did | dlevel |      dauthor      |        dtitle         |      cname      
  -----+-----+--------+-------------------+-----------------------+-----------------
-   11 |   1 |      1 | rls_regress_user1 | my first novel        | novel
-   44 |   4 |      1 | rls_regress_user1 | my first manga        | manga
-   22 |   6 |      1 | rls_regress_user2 | great science fiction | science fiction
-   44 |   8 |      1 | rls_regress_user2 | great manga           | manga
+   11 |   1 |      1 | regress_rls_bob   | my first novel        | novel
+   44 |   4 |      1 | regress_rls_bob   | my first manga        | manga
+   22 |   6 |      1 | regress_rls_carol | great science fiction | science fiction
+   44 |   8 |      1 | regress_rls_carol | great manga           | manga
  (4 rows)
  
  -- try a sampled version
  SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0)
    WHERE f_leak(dtitle) ORDER BY did;
 -NOTICE:  f_leak => my first manga
 -NOTICE:  f_leak => great science fiction
 -NOTICE:  f_leak => great manga
   did | cid | dlevel |      dauthor      |        dtitle         
  -----+-----+--------+-------------------+-----------------------
 -   4 |  44 |      1 | regress_rls_bob   | my first manga
 -   6 |  22 |      1 | regress_rls_carol | great science fiction
 -   8 |  44 |      1 | regress_rls_carol | great manga
 -(3 rows)
 +   6 |  22 |      1 | rls_regress_user2 | great science fiction
 +   8 |  44 |      1 | rls_regress_user2 | great manga
 +(2 rows)
  
- -- viewpoint from rls_regress_user2
- SET SESSION AUTHORIZATION rls_regress_user2;
+ -- viewpoint from regress_rls_carol
+ SET SESSION AUTHORIZATION regress_rls_carol;
  SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did;
 -NOTICE:  f_leak => my first novel
 -NOTICE:  f_leak => my second novel
 -NOTICE:  f_leak => my science fiction
 -NOTICE:  f_leak => my first manga
 -NOTICE:  f_leak => my second manga
 -NOTICE:  f_leak => great science fiction
 -NOTICE:  f_leak => great technology book
 -NOTICE:  f_leak => great manga
   did | cid | dlevel |      dauthor      |        dtitle         
  -----+-----+--------+-------------------+-----------------------
-    1 |  11 |      1 | rls_regress_user1 | my first novel
-    2 |  11 |      2 | rls_regress_user1 | my second novel
-    3 |  22 |      2 | rls_regress_user1 | my science fiction
-    4 |  44 |      1 | rls_regress_user1 | my first manga
-    5 |  44 |      2 | rls_regress_user1 | my second manga
-    6 |  22 |      1 | rls_regress_user2 | great science fiction
-    7 |  33 |      2 | rls_regress_user2 | great technology book
-    8 |  44 |      1 | rls_regress_user2 | great manga
+    1 |  11 |      1 | regress_rls_bob   | my first novel
+    2 |  11 |      2 | regress_rls_bob   | my second novel
+    3 |  22 |      2 | regress_rls_bob   | my science fiction
+    4 |  44 |      1 | regress_rls_bob   | my first manga
+    5 |  44 |      2 | regress_rls_bob   | my second manga
+    6 |  22 |      1 | regress_rls_carol | great science fiction
+    7 |  33 |      2 | regress_rls_carol | great technology book
+    8 |  44 |      1 | regress_rls_carol | great manga
  (8 rows)
  
  SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did;
 -NOTICE:  f_leak => my first novel
 -NOTICE:  f_leak => my second novel
 -NOTICE:  f_leak => my science fiction
 -NOTICE:  f_leak => my first manga
 -NOTICE:  f_leak => my second manga
 -NOTICE:  f_leak => great science fiction
 -NOTICE:  f_leak => great technology book
 -NOTICE:  f_leak => great manga
   cid | did | dlevel |      dauthor      |        dtitle         |      cname      
  -----+-----+--------+-------------------+-----------------------+-----------------
-   11 |   1 |      1 | rls_regress_user1 | my first novel        | novel
-   11 |   2 |      2 | rls_regress_user1 | my second novel       | novel
-   22 |   3 |      2 | rls_regress_user1 | my science fiction    | science fiction
-   44 |   4 |      1 | rls_regress_user1 | my first manga        | manga
-   44 |   5 |      2 | rls_regress_user1 | my second manga       | manga
-   22 |   6 |      1 | rls_regress_user2 | great science fiction | science fiction
-   33 |   7 |      2 | rls_regress_user2 | great technology book | technology
-   44 |   8 |      1 | rls_regress_user2 | great manga           | manga
+   11 |   1 |      1 | regress_rls_bob   | my first novel        | novel
+   11 |   2 |      2 | regress_rls_bob   | my second novel       | novel
+   22 |   3 |      2 | regress_rls_bob   | my science fiction    | science fiction
+   44 |   4 |      1 | regress_rls_bob   | my first manga        | manga
+   44 |   5 |      2 | regress_rls_bob   | my second manga       | manga
+   22 |   6 |      1 | regress_rls_carol | great science fiction | science fiction
+   33 |   7 |      2 | regress_rls_carol | great technology book | technology
+   44 |   8 |      1 | regress_rls_carol | great manga           | manga
  (8 rows)
  
  -- try a sampled version
@@@ -180,46 -206,62 +176,46 @@@ ALTER POLICY p1 ON document USING (true
  ERROR:  must be owner of relation document
  DROP POLICY p1 ON document;                  --fail
  ERROR:  must be owner of relation document
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  ALTER POLICY p1 ON document USING (dauthor = current_user);
- -- viewpoint from rls_regress_user1 again
- SET SESSION AUTHORIZATION rls_regress_user1;
+ -- viewpoint from regress_rls_bob again
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did;
 -NOTICE:  f_leak => my first novel
 -NOTICE:  f_leak => my second novel
 -NOTICE:  f_leak => my science fiction
 -NOTICE:  f_leak => my first manga
 -NOTICE:  f_leak => my second manga
 - did | cid | dlevel |     dauthor     |       dtitle       
 ------+-----+--------+-----------------+--------------------
 -   1 |  11 |      1 | regress_rls_bob | my first novel
 -   2 |  11 |      2 | regress_rls_bob | my second novel
 -   3 |  22 |      2 | regress_rls_bob | my science fiction
 -   4 |  44 |      1 | regress_rls_bob | my first manga
 -   5 |  44 |      2 | regress_rls_bob | my second manga
 + did | cid | dlevel |      dauthor      |       dtitle       
 +-----+-----+--------+-------------------+--------------------
 +   1 |  11 |      1 | rls_regress_user1 | my first novel
 +   2 |  11 |      2 | rls_regress_user1 | my second novel
 +   3 |  22 |      2 | rls_regress_user1 | my science fiction
 +   4 |  44 |      1 | rls_regress_user1 | my first manga
 +   5 |  44 |      2 | rls_regress_user1 | my second manga
  (5 rows)
  
  SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER by did;
 -NOTICE:  f_leak => my first novel
 -NOTICE:  f_leak => my second novel
 -NOTICE:  f_leak => my science fiction
 -NOTICE:  f_leak => my first manga
 -NOTICE:  f_leak => my second manga
 - cid | did | dlevel |     dauthor     |       dtitle       |      cname      
 ------+-----+--------+-----------------+--------------------+-----------------
 -  11 |   1 |      1 | regress_rls_bob | my first novel     | novel
 -  11 |   2 |      2 | regress_rls_bob | my second novel    | novel
 -  22 |   3 |      2 | regress_rls_bob | my science fiction | science fiction
 -  44 |   4 |      1 | regress_rls_bob | my first manga     | manga
 -  44 |   5 |      2 | regress_rls_bob | my second manga    | manga
 + cid | did | dlevel |      dauthor      |       dtitle       |      cname      
 +-----+-----+--------+-------------------+--------------------+-----------------
 +  11 |   1 |      1 | rls_regress_user1 | my first novel     | novel
 +  11 |   2 |      2 | rls_regress_user1 | my second novel    | novel
 +  22 |   3 |      2 | rls_regress_user1 | my science fiction | science fiction
 +  44 |   4 |      1 | rls_regress_user1 | my first manga     | manga
 +  44 |   5 |      2 | rls_regress_user1 | my second manga    | manga
  (5 rows)
  
- -- viewpoint from rls_regres_user2 again
- SET SESSION AUTHORIZATION rls_regress_user2;
+ -- viewpoint from rls_regres_carol again
+ SET SESSION AUTHORIZATION regress_rls_carol;
  SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did;
 -NOTICE:  f_leak => great science fiction
 -NOTICE:  f_leak => great technology book
 -NOTICE:  f_leak => great manga
   did | cid | dlevel |      dauthor      |        dtitle         
  -----+-----+--------+-------------------+-----------------------
-    6 |  22 |      1 | rls_regress_user2 | great science fiction
-    7 |  33 |      2 | rls_regress_user2 | great technology book
-    8 |  44 |      1 | rls_regress_user2 | great manga
+    6 |  22 |      1 | regress_rls_carol | great science fiction
+    7 |  33 |      2 | regress_rls_carol | great technology book
+    8 |  44 |      1 | regress_rls_carol | great manga
  (3 rows)
  
  SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER by did;
 -NOTICE:  f_leak => great science fiction
 -NOTICE:  f_leak => great technology book
 -NOTICE:  f_leak => great manga
   cid | did | dlevel |      dauthor      |        dtitle         |      cname      
  -----+-----+--------+-------------------+-----------------------+-----------------
-   22 |   6 |      1 | rls_regress_user2 | great science fiction | science fiction
-   33 |   7 |      2 | rls_regress_user2 | great technology book | technology
-   44 |   8 |      1 | rls_regress_user2 | great manga           | manga
+   22 |   6 |      1 | regress_rls_carol | great science fiction | science fiction
+   33 |   7 |      2 | regress_rls_carol | great technology book | technology
+   44 |   8 |      1 | regress_rls_carol | great manga           | manga
  (3 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle);
           Filter: f_leak(document.dtitle)
           ->  Seq Scan on document document_1
                 Filter: (dauthor = "current_user"())
 -   ->  Index Scan using category_pkey on category
 -         Index Cond: (cid = document.cid)
 -(7 rows)
 +(5 rows)
 +
 +EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle);
 +                        QUERY PLAN                        
 +----------------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Nested Loop
 +         ->  Subquery Scan on document
 +               Filter: f_leak(document.dtitle)
 +               ->  Seq Scan on document document_1
 +                     Filter: (dauthor = "current_user"())
 +         ->  Index Scan using category_pkey on category
 +               Index Cond: (cid = document.cid)
 +(8 rows)
  
  -- interaction of FK/PK constraints
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  CREATE POLICY p2 ON category
-     USING (CASE WHEN current_user = 'rls_regress_user1' THEN cid IN (11, 33)
-            WHEN current_user = 'rls_regress_user2' THEN cid IN (22, 44)
+     USING (CASE WHEN current_user = 'regress_rls_bob' THEN cid IN (11, 33)
+            WHEN current_user = 'regress_rls_carol' THEN cid IN (22, 44)
             ELSE false END);
  ALTER TABLE category ENABLE ROW LEVEL SECURITY;
  -- cannot delete PK referenced by invisible FK
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid;
 - did | cid | dlevel |     dauthor     |       dtitle       | cid |   cname    
 ------+-----+--------+-----------------+--------------------+-----+------------
 -   2 |  11 |      2 | regress_rls_bob | my second novel    |  11 | novel
 -   1 |  11 |      1 | regress_rls_bob | my first novel     |  11 | novel
 -     |     |        |                 |                    |  33 | technology
 -   5 |  44 |      2 | regress_rls_bob | my second manga    |     | 
 -   4 |  44 |      1 | regress_rls_bob | my first manga     |     | 
 -   3 |  22 |      2 | regress_rls_bob | my science fiction |     | 
 + did | cid | dlevel |      dauthor      |       dtitle       | cid |   cname    
 +-----+-----+--------+-------------------+--------------------+-----+------------
 +   4 |  44 |      1 | rls_regress_user1 | my first manga     |     | 
 +   5 |  44 |      2 | rls_regress_user1 | my second manga    |     | 
 +   2 |  11 |      2 | rls_regress_user1 | my second novel    |  11 | novel
 +   1 |  11 |      1 | rls_regress_user1 | my first novel     |  11 | novel
 +     |     |        |                   |                    |  33 | technology
 +   3 |  22 |      2 | rls_regress_user1 | my science fiction |     | 
  (6 rows)
  
  DELETE FROM category WHERE cid = 33;    -- fails with FK violation
@@@ -608,49 -632,55 +604,69 @@@ EXPLAIN (COSTS OFF) SELECT * FROM t1 WH
           ->  LockRows
                 ->  Result
                       ->  Append
 -                           ->  Seq Scan on t1 t1_1
 -                                 Filter: ((a % 2) = 0)
 -                           ->  Seq Scan on t2
 -                                 Filter: ((a % 2) = 0)
 -                           ->  Seq Scan on t3
 -                                 Filter: ((a % 2) = 0)
 -(12 rows)
 +                           ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +                                 ->  Seq Scan on t1 t1_1
 +                                       Filter: ((a % 2) = 0)
 +                           ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +                                 ->  Seq Scan on t2
 +                                       Filter: ((a % 2) = 0)
 +                           ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +                                 ->  Seq Scan on t3
 +                                       Filter: ((a % 2) = 0)
 +(15 rows)
 +
 +-- union all query
 +SELECT a, b, oid FROM t2 UNION ALL SELECT a, b, oid FROM t3;
 + a |  b  | oid 
 +---+-----+-----
 + 1 | abc | 201
 + 3 | cde | 203
 + 1 | xxx | 301
 + 2 | yyy | 302
 + 3 | zzz | 303
 +(5 rows)
 +
 +EXPLAIN (COSTS OFF) SELECT a, b, oid FROM t2 UNION ALL SELECT a, b, oid FROM t3;
 +                        QUERY PLAN                         
 +-----------------------------------------------------------
 + Append
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  Seq Scan on t2
 +               Filter: ((a % 2) = 1)
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  Seq Scan on t3
 +(6 rows)
  
+ -- union all query
+ SELECT a, b, oid FROM t2 UNION ALL SELECT a, b, oid FROM t3;
+  a |  b  | oid 
+ ---+-----+-----
+  1 | abc | 201
+  3 | cde | 203
+  1 | xxx | 301
+  2 | yyy | 302
+  3 | zzz | 303
+ (5 rows)
+ EXPLAIN (COSTS OFF) SELECT a, b, oid FROM t2 UNION ALL SELECT a, b, oid FROM t3;
+           QUERY PLAN           
+ -------------------------------
+  Append
+    ->  Seq Scan on t2
+          Filter: ((a % 2) = 1)
+    ->  Seq Scan on t3
+ (4 rows)
  -- superuser is allowed to bypass RLS checks
  RESET SESSION AUTHORIZATION;
  SET row_security TO OFF;
  SELECT * FROM t1 WHERE f_leak(b);
 -NOTICE:  f_leak => aba
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => ccc
 -NOTICE:  f_leak => dad
 -NOTICE:  f_leak => abc
 -NOTICE:  f_leak => bcd
 -NOTICE:  f_leak => cde
 -NOTICE:  f_leak => def
 -NOTICE:  f_leak => xxx
 -NOTICE:  f_leak => yyy
 -NOTICE:  f_leak => zzz
   a |  b  
  ---+-----
-  1 | aaa
+  1 | aba
   2 | bbb
   3 | ccc
-  4 | ddd
+  4 | dad
   1 | abc
   2 | bcd
   3 | cde
  (11 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b);
 -        QUERY PLAN         
 ----------------------------
 +                        QUERY PLAN                         
 +-----------------------------------------------------------
   Append
 -   ->  Seq Scan on t1
 -         Filter: f_leak(b)
 -   ->  Seq Scan on t2
 -         Filter: f_leak(b)
 -   ->  Seq Scan on t3
 -         Filter: f_leak(b)
 -(7 rows)
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  Seq Scan on t1
 +               Filter: f_leak(b)
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  Seq Scan on t2
 +               Filter: f_leak(b)
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  Seq Scan on t3
 +               Filter: f_leak(b)
 +(10 rows)
  
  -- non-superuser with bypass privilege can bypass RLS policy when disabled
- SET SESSION AUTHORIZATION rls_regress_exempt_user;
+ SET SESSION AUTHORIZATION regress_rls_exempt_user;
  SET row_security TO OFF;
  SELECT * FROM t1 WHERE f_leak(b);
 -NOTICE:  f_leak => aba
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => ccc
 -NOTICE:  f_leak => dad
 -NOTICE:  f_leak => abc
 -NOTICE:  f_leak => bcd
 -NOTICE:  f_leak => cde
 -NOTICE:  f_leak => def
 -NOTICE:  f_leak => xxx
 -NOTICE:  f_leak => yyy
 -NOTICE:  f_leak => zzz
   a |  b  
  ---+-----
-  1 | aaa
+  1 | aba
   2 | bbb
   3 | ccc
-  4 | ddd
+  4 | dad
   1 | abc
   2 | bcd
   3 | cde
  (11 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b);
 -        QUERY PLAN         
 ----------------------------
 +                        QUERY PLAN                         
 +-----------------------------------------------------------
   Append
 -   ->  Seq Scan on t1
 -         Filter: f_leak(b)
 -   ->  Seq Scan on t2
 -         Filter: f_leak(b)
 -   ->  Seq Scan on t3
 -         Filter: f_leak(b)
 -(7 rows)
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  Seq Scan on t1
 +               Filter: f_leak(b)
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  Seq Scan on t2
 +               Filter: f_leak(b)
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  Seq Scan on t3
 +               Filter: f_leak(b)
 +(10 rows)
  
  ----- Dependencies -----
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  SET row_security TO ON;
  CREATE TABLE dependee (x integer, y integer);
  CREATE TABLE dependent (x integer, y integer);
@@@ -756,15 -789,13 +772,15 @@@ ERROR:  infinite recursion detected in 
  --
  -- Mutual recursion via views
  --
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  CREATE VIEW rec1v AS SELECT * FROM rec1;
  CREATE VIEW rec2v AS SELECT * FROM rec2;
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  ALTER POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y));
 +ERROR:  relation "rec2v" does not exist
  ALTER POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1v WHERE y = b));
 -SET SESSION AUTHORIZATION regress_rls_bob;
 +ERROR:  relation "rec1v" does not exist
 +SET SESSION AUTHORIZATION rls_regress_user1;
  SELECT * FROM rec1;    -- fail, mutual recursion via views
  ERROR:  infinite recursion detected in policy for relation "rec1"
  --
@@@ -777,12 -808,10 +793,12 @@@ DROP VIEW rec1v, rec2v CASCADE
  RESET client_min_messages;
  CREATE VIEW rec1v WITH (security_barrier) AS SELECT * FROM rec1;
  CREATE VIEW rec2v WITH (security_barrier) AS SELECT * FROM rec2;
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  CREATE POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y));
 +ERROR:  policy "r1" for relation "rec1" already exists
  CREATE POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1v WHERE y = b));
 -SET SESSION AUTHORIZATION regress_rls_bob;
 +ERROR:  policy "r2" for relation "rec2" already exists
 +SET SESSION AUTHORIZATION rls_regress_user1;
  SELECT * FROM rec1;    -- fail, mutual recursion via s.b. views
  ERROR:  infinite recursion detected in policy for relation "rec1"
  --
@@@ -805,11 -834,13 +821,11 @@@ SELECT * FROM s1 WHERE f_leak(b); -- fa
  ERROR:  infinite recursion detected in policy for relation "s1"
  INSERT INTO s1 VALUES (1, 'foo'); -- fail (infinite recursion)
  ERROR:  infinite recursion detected in policy for relation "s1"
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  DROP POLICY p3 on s1;
  ALTER POLICY p2 ON s2 USING (x % 2 = 0);
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM s1 WHERE f_leak(b);     -- OK
 -NOTICE:  f_leak => c81e728d9d4c2f636f067f89cc14862c
 -NOTICE:  f_leak => a87ff679a2f3e71d9181a67b7542122c
   a |                b                 
  ---+----------------------------------
   2 | c81e728d9d4c2f636f067f89cc14862c
  (2 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM only s1 WHERE f_leak(b);
 -                        QUERY PLAN                        
 -----------------------------------------------------------
 - Subquery Scan on s1
 -   Filter: f_leak(s1.b)
 -   ->  Hash Join
 -         Hash Cond: (s1_1.a = s2.x)
 -         ->  Seq Scan on s1 s1_1
 -         ->  Hash
 -               ->  HashAggregate
 -                     Group Key: s2.x
 -                     ->  Subquery Scan on s2
 -                           Filter: (s2.y ~~ '%2f%'::text)
 -                           ->  Seq Scan on s2 s2_1
 -                                 Filter: ((x % 2) = 0)
 -(12 rows)
 +                           QUERY PLAN                           
 +----------------------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Subquery Scan on s1
 +         Filter: f_leak(s1.b)
 +         ->  Hash Join
 +               Hash Cond: (s1_1.a = s2.x)
 +               ->  Seq Scan on s1 s1_1
 +               ->  Hash
 +                     ->  HashAggregate
 +                           Group Key: s2.x
 +                           ->  Subquery Scan on s2
 +                                 Filter: (s2.y ~~ '%2f%'::text)
 +                                 ->  Seq Scan on s2 s2_1
 +                                       Filter: ((x % 2) = 0)
 +(13 rows)
  
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  ALTER POLICY p1 ON s1 USING (a in (select x from v2)); -- using VIEW in RLS policy
 -SET SESSION AUTHORIZATION regress_rls_bob;
 +ERROR:  relation "v2" does not exist
 +SET SESSION AUTHORIZATION rls_regress_user1;
  SELECT * FROM s1 WHERE f_leak(b);     -- OK
 -NOTICE:  f_leak => 0267aaf632e87a63288a08331f22c7c3
 -NOTICE:  f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
 - a  |                b                 
 -----+----------------------------------
 - -4 | 0267aaf632e87a63288a08331f22c7c3
 -  6 | 1679091c5a880faf6fb5e6087eb1b2dc
 + a |                b                 
 +---+----------------------------------
 + 2 | c81e728d9d4c2f636f067f89cc14862c
 + 4 | a87ff679a2f3e71d9181a67b7542122c
  (2 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM s1 WHERE f_leak(b);
@@@ -872,34 -902,31 +888,34 @@@ SELECT (SELECT x FROM s1 LIMIT 1) xx, 
  (3 rows)
  
  EXPLAIN (COSTS OFF) SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%';
 -                             QUERY PLAN                             
 ---------------------------------------------------------------------
 - Subquery Scan on s2
 -   Filter: (s2.y ~~ '%28%'::text)
 -   ->  Seq Scan on s2 s2_1
 -         Filter: ((x % 2) = 0)
 -   SubPlan 1
 -     ->  Limit
 -           ->  Subquery Scan on s1
 -                 ->  Nested Loop Semi Join
 -                       Join Filter: (s1_1.a = s2_2.x)
 -                       ->  Seq Scan on s1 s1_1
 -                       ->  Materialize
 -                             ->  Subquery Scan on s2_2
 -                                   Filter: (s2_2.y ~~ '%af%'::text)
 -                                   ->  Seq Scan on s2 s2_3
 -                                         Filter: ((x % 2) = 0)
 -(15 rows)
 +                                      QUERY PLAN                                      
 +--------------------------------------------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Subquery Scan on s2
 +         Filter: (s2.y ~~ '%28%'::text)
 +         ->  Seq Scan on s2 s2_1
 +               Filter: ((x % 2) = 0)
 +         SubPlan 1
 +           ->  Limit
 +                 ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +                       ->  Limit
 +                             ->  Subquery Scan on s1
 +                                   ->  Nested Loop Semi Join
 +                                         Join Filter: (s1_1.a = s2_2.x)
 +                                         ->  Seq Scan on s1 s1_1
 +                                         ->  Materialize
 +                                               ->  Subquery Scan on s2_2
 +                                                     Filter: (s2_2.y ~~ '%2f%'::text)
 +                                                     ->  Seq Scan on s2 s2_3
 +                                                           Filter: ((x % 2) = 0)
 +(18 rows)
  
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  ALTER POLICY p2 ON s2 USING (x in (select a from s1 where b like '%d2%'));
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM s1 WHERE f_leak(b);     -- fail (infinite recursion via view)
  ERROR:  infinite recursion detected in policy for relation "s1"
- -- prepared statement with rls_regress_user0 privilege
+ -- prepared statement with regress_rls_alice privilege
  PREPARE p1(int) AS SELECT * FROM t1 WHERE a <= $1;
  EXECUTE p1(2);
   a |  b  
@@@ -928,12 -952,23 +944,12 @@@ EXPLAIN (COSTS OFF) EXECUTE p1(2)
  RESET SESSION AUTHORIZATION;
  SET row_security TO OFF;
  SELECT * FROM t1 WHERE f_leak(b);
 -NOTICE:  f_leak => aba
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => ccc
 -NOTICE:  f_leak => dad
 -NOTICE:  f_leak => abc
 -NOTICE:  f_leak => bcd
 -NOTICE:  f_leak => cde
 -NOTICE:  f_leak => def
 -NOTICE:  f_leak => xxx
 -NOTICE:  f_leak => yyy
 -NOTICE:  f_leak => zzz
   a |  b  
  ---+-----
-  1 | aaa
+  1 | aba
   2 | bbb
   3 | ccc
-  4 | ddd
+  4 | dad
   1 | abc
   2 | bcd
   3 | cde
@@@ -995,22 -1024,19 +1011,22 @@@ EXECUTE p2(2)
  (3 rows)
  
  EXPLAIN (COSTS OFF) EXECUTE p2(2);
 -       QUERY PLAN        
 --------------------------
 +                        QUERY PLAN                         
 +-----------------------------------------------------------
   Append
 -   ->  Seq Scan on t1
 -         Filter: (a = 2)
 -   ->  Seq Scan on t2
 -         Filter: (a = 2)
 -   ->  Seq Scan on t3
 -         Filter: (a = 2)
 -(7 rows)
 +   ->  Remote Subquery Scan on all (datanode_1)
 +         ->  Seq Scan on t1
 +               Filter: (a = 2)
 +   ->  Remote Subquery Scan on all (datanode_1)
 +         ->  Seq Scan on t2
 +               Filter: (a = 2)
 +   ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +         ->  Seq Scan on t3
 +               Filter: (a = 2)
 +(10 rows)
  
  -- also, case when privilege switch from superuser
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SET row_security TO ON;
  EXECUTE p2(2);
   a |  b  
@@@ -1038,48 -1061,55 +1054,48 @@@ EXPLAIN (COSTS OFF) EXECUTE p2(2)
  --
  -- UPDATE / DELETE and Row-level security
  --
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  EXPLAIN (COSTS OFF) UPDATE t1 SET b = b || b WHERE f_leak(b);
 -                QUERY PLAN                 
 --------------------------------------------
 - Update on t1 t1_3
 -   Update on t1 t1_3
 -   Update on t2 t1
 -   Update on t3 t1
 -   ->  Subquery Scan on t1
 -         Filter: f_leak(t1.b)
 -         ->  LockRows
 -               ->  Seq Scan on t1 t1_4
 -                     Filter: ((a % 2) = 0)
 -   ->  Subquery Scan on t1_1
 -         Filter: f_leak(t1_1.b)
 -         ->  LockRows
 -               ->  Seq Scan on t2
 -                     Filter: ((a % 2) = 0)
 -   ->  Subquery Scan on t1_2
 -         Filter: f_leak(t1_2.b)
 -         ->  LockRows
 -               ->  Seq Scan on t3
 -                     Filter: ((a % 2) = 0)
 -(19 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Update on t1 t1_3
 +         Update on t1 t1_3
 +         Update on t2 t1
 +         Update on t3 t1
 +         ->  Subquery Scan on t1
 +               Filter: f_leak(t1.b)
 +               ->  LockRows
 +                     ->  Seq Scan on t1 t1_4
 +                           Filter: ((a % 2) = 0)
 +         ->  Subquery Scan on t1_1
 +               Filter: f_leak(t1_1.b)
 +               ->  LockRows
 +                     ->  Seq Scan on t2
 +                           Filter: ((a % 2) = 0)
 +         ->  Subquery Scan on t1_2
 +               Filter: f_leak(t1_2.b)
 +               ->  LockRows
 +                     ->  Seq Scan on t3
 +                           Filter: ((a % 2) = 0)
 +(20 rows)
  
  UPDATE t1 SET b = b || b WHERE f_leak(b);
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => dad
 -NOTICE:  f_leak => bcd
 -NOTICE:  f_leak => def
 -NOTICE:  f_leak => yyy
  EXPLAIN (COSTS OFF) UPDATE only t1 SET b = b || '_updt' WHERE f_leak(b);
 -                QUERY PLAN                 
 --------------------------------------------
 - Update on t1 t1_1
 -   ->  Subquery Scan on t1
 -         Filter: f_leak(t1.b)
 -         ->  LockRows
 -               ->  Seq Scan on t1 t1_2
 -                     Filter: ((a % 2) = 0)
 -(6 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Update on t1 t1_1
 +         ->  Subquery Scan on t1
 +               Filter: f_leak(t1.b)
 +               ->  LockRows
 +                     ->  Seq Scan on t1 t1_2
 +                           Filter: ((a % 2) = 0)
 +(7 rows)
  
  UPDATE only t1 SET b = b || '_updt' WHERE f_leak(b);
 -NOTICE:  f_leak => bbbbbb
 -NOTICE:  f_leak => daddad
  -- returning clause with system column
  UPDATE only t1 SET b = b WHERE f_leak(b) RETURNING oid, *, t1;
 -NOTICE:  f_leak => bbbbbb_updt
 -NOTICE:  f_leak => daddad_updt
   oid | a |      b      |       t1        
  -----+---+-------------+-----------------
   102 | 2 | bbbbbb_updt | (2,bbbbbb_updt)
@@@ -1170,46 -1358,46 +1186,46 @@@ SELECT * FROM t1 ORDER BY a,b
   4 | defdef
  (11 rows)
  
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SET row_security TO ON;
  EXPLAIN (COSTS OFF) DELETE FROM only t1 WHERE f_leak(b);
 -                QUERY PLAN                 
 --------------------------------------------
 - Delete on t1 t1_1
 -   ->  Subquery Scan on t1
 -         Filter: f_leak(t1.b)
 -         ->  LockRows
 -               ->  Seq Scan on t1 t1_2
 -                     Filter: ((a % 2) = 0)
 -(6 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Delete on t1 t1_1
 +         ->  Subquery Scan on t1
 +               Filter: f_leak(t1.b)
 +               ->  LockRows
 +                     ->  Seq Scan on t1 t1_2
 +                           Filter: ((a % 2) = 0)
 +(7 rows)
  
  EXPLAIN (COSTS OFF) DELETE FROM t1 WHERE f_leak(b);
 -                QUERY PLAN                 
 --------------------------------------------
 - Delete on t1 t1_3
 -   Delete on t1 t1_3
 -   Delete on t2 t1
 -   Delete on t3 t1
 -   ->  Subquery Scan on t1
 -         Filter: f_leak(t1.b)
 -         ->  LockRows
 -               ->  Seq Scan on t1 t1_4
 -                     Filter: ((a % 2) = 0)
 -   ->  Subquery Scan on t1_1
 -         Filter: f_leak(t1_1.b)
 -         ->  LockRows
 -               ->  Seq Scan on t2
 -                     Filter: ((a % 2) = 0)
 -   ->  Subquery Scan on t1_2
 -         Filter: f_leak(t1_2.b)
 -         ->  LockRows
 -               ->  Seq Scan on t3
 -                     Filter: ((a % 2) = 0)
 -(19 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Delete on t1 t1_3
 +         Delete on t1 t1_3
 +         Delete on t2 t1
 +         Delete on t3 t1
 +         ->  Subquery Scan on t1
 +               Filter: f_leak(t1.b)
 +               ->  LockRows
 +                     ->  Seq Scan on t1 t1_4
 +                           Filter: ((a % 2) = 0)
 +         ->  Subquery Scan on t1_1
 +               Filter: f_leak(t1_1.b)
 +               ->  LockRows
 +                     ->  Seq Scan on t2
 +                           Filter: ((a % 2) = 0)
 +         ->  Subquery Scan on t1_2
 +               Filter: f_leak(t1_2.b)
 +               ->  LockRows
 +                     ->  Seq Scan on t3
 +                           Filter: ((a % 2) = 0)
 +(20 rows)
  
  DELETE FROM only t1 WHERE f_leak(b) RETURNING oid, *, t1;
 -NOTICE:  f_leak => bbbbbb_updt
 -NOTICE:  f_leak => daddad_updt
   oid | a |      b      |       t1        
  -----+---+-------------+-----------------
   102 | 2 | bbbbbb_updt | (2,bbbbbb_updt)
@@@ -1232,22 -1423,26 +1248,22 @@@ CREATE TABLE b1 (a int, b text)
  INSERT INTO b1 (SELECT x, md5(x::text) FROM generate_series(-10,10) x);
  CREATE POLICY p1 ON b1 USING (a % 2 = 0);
  ALTER TABLE b1 ENABLE ROW LEVEL SECURITY;
- GRANT ALL ON b1 TO rls_regress_user1;
- SET SESSION AUTHORIZATION rls_regress_user1;
+ GRANT ALL ON b1 TO regress_rls_bob;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  CREATE VIEW bv1 WITH (security_barrier) AS SELECT * FROM b1 WHERE a > 0 WITH CHECK OPTION;
- GRANT ALL ON bv1 TO rls_regress_user2;
- SET SESSION AUTHORIZATION rls_regress_user2;
+ GRANT ALL ON bv1 TO regress_rls_carol;
+ SET SESSION AUTHORIZATION regress_rls_carol;
  EXPLAIN (COSTS OFF) SELECT * FROM bv1 WHERE f_leak(b);
 -                 QUERY PLAN                  
 ----------------------------------------------
 - Subquery Scan on bv1
 -   Filter: f_leak(bv1.b)
 -   ->  Seq Scan on b1
 -         Filter: ((a > 0) AND ((a % 2) = 0))
 -(4 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Subquery Scan on bv1
 +         Filter: f_leak(bv1.b)
 +         ->  Seq Scan on b1
 +               Filter: ((a > 0) AND ((a % 2) = 0))
 +(5 rows)
  
  SELECT * FROM bv1 WHERE f_leak(b);
 -NOTICE:  f_leak => c81e728d9d4c2f636f067f89cc14862c
 -NOTICE:  f_leak => a87ff679a2f3e71d9181a67b7542122c
 -NOTICE:  f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
 -NOTICE:  f_leak => c9f0f895fb98ab9159f51fd0297e236d
 -NOTICE:  f_leak => d3d9446802a44259755d38e6d163e820
   a  |                b                 
  ----+----------------------------------
    2 | c81e728d9d4c2f636f067f89cc14862c
  (5 rows)
  
  INSERT INTO bv1 VALUES (-1, 'xxx'); -- should fail view WCO
- ERROR:  new row violates row level security policy for "b1"
+ ERROR:  new row violates row-level security policy for table "b1"
  INSERT INTO bv1 VALUES (11, 'xxx'); -- should fail RLS check
- ERROR:  new row violates row level security policy for "b1"
+ ERROR:  new row violates row-level security policy for table "b1"
  INSERT INTO bv1 VALUES (12, 'xxx'); -- ok
  EXPLAIN (COSTS OFF) UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b);
 -                                QUERY PLAN                                 
 ----------------------------------------------------------------------------
 - Update on b1 b1_1
 -   ->  Subquery Scan on b1
 -         Filter: f_leak(b1.b)
 -         ->  Subquery Scan on b1_2
 -               ->  LockRows
 -                     ->  Seq Scan on b1 b1_3
 -                           Filter: ((a > 0) AND (a = 4) AND ((a % 2) = 0))
 -(7 rows)
 +                                   QUERY PLAN                                    
 +---------------------------------------------------------------------------------
 + Remote Subquery Scan on all (datanode_2)
 +   ->  Update on b1 b1_1
 +         ->  Subquery Scan on b1
 +               Filter: f_leak(b1.b)
 +               ->  Subquery Scan on b1_2
 +                     ->  LockRows
 +                           ->  Seq Scan on b1 b1_3
 +                                 Filter: ((a > 0) AND (a = 4) AND ((a % 2) = 0))
 +(8 rows)
  
  UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b);
 -NOTICE:  f_leak => a87ff679a2f3e71d9181a67b7542122c
  EXPLAIN (COSTS OFF) DELETE FROM bv1 WHERE a = 6 AND f_leak(b);
 -                                QUERY PLAN                                 
 ----------------------------------------------------------------------------
 - Delete on b1 b1_1
 -   ->  Subquery Scan on b1
 -         Filter: f_leak(b1.b)
 -         ->  Subquery Scan on b1_2
 -               ->  LockRows
 -                     ->  Seq Scan on b1 b1_3
 -                           Filter: ((a > 0) AND (a = 6) AND ((a % 2) = 0))
 -(7 rows)
 +                                   QUERY PLAN                                    
 +---------------------------------------------------------------------------------
 + Remote Subquery Scan on all (datanode_1)
 +   ->  Delete on b1 b1_1
 +         ->  Subquery Scan on b1
 +               Filter: f_leak(b1.b)
 +               ->  Subquery Scan on b1_2
 +                     ->  LockRows
 +                           ->  Seq Scan on b1 b1_3
 +                                 Filter: ((a > 0) AND (a = 6) AND ((a % 2) = 0))
 +(8 rows)
  
  DELETE FROM bv1 WHERE a = 6 AND f_leak(b);
 -NOTICE:  f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
 -SET SESSION AUTHORIZATION regress_rls_alice;
 +SET SESSION AUTHORIZATION rls_regress_user0;
  SELECT * FROM b1;
    a  |                b                 
  -----+----------------------------------
@@@ -1452,20 -1647,23 +1468,21 @@@ ERROR:  new row violates row-level secu
  --
  -- ROLE/GROUP
  --
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  CREATE TABLE z1 (a int, b text);
- GRANT SELECT ON z1 TO rls_regress_group1, rls_regress_group2,
-     rls_regress_user1, rls_regress_user2;
+ CREATE TABLE z2 (a int, b text);
+ GRANT SELECT ON z1,z2 TO regress_rls_group1, regress_rls_group2,
+     regress_rls_bob, regress_rls_carol;
  INSERT INTO z1 VALUES
-     (1, 'aaa'),
+     (1, 'aba'),
      (2, 'bbb'),
      (3, 'ccc'),
-     (4, 'ddd');
- CREATE POLICY p1 ON z1 TO rls_regress_group1 USING (a % 2 = 0);
- CREATE POLICY p2 ON z1 TO rls_regress_group2 USING (a % 2 = 1);
+     (4, 'dad');
+ CREATE POLICY p1 ON z1 TO regress_rls_group1 USING (a % 2 = 0);
+ CREATE POLICY p2 ON z1 TO regress_rls_group2 USING (a % 2 = 1);
  ALTER TABLE z1 ENABLE ROW LEVEL SECURITY;
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM z1 WHERE f_leak(b);
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => dad
   a |  b  
  ---+-----
   2 | bbb
  (2 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b);
 -          QUERY PLAN           
 --------------------------------
 - Subquery Scan on z1
 -   Filter: f_leak(z1.b)
 -   ->  Seq Scan on z1 z1_1
 -         Filter: ((a % 2) = 0)
 -(4 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Subquery Scan on z1
 +         Filter: f_leak(z1.b)
 +         ->  Seq Scan on z1 z1_1
 +               Filter: ((a % 2) = 0)
 +(5 rows)
  
- SET ROLE rls_regress_group1;
+ PREPARE plancache_test AS SELECT * FROM z1 WHERE f_leak(b);
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test;
+           QUERY PLAN           
+ -------------------------------
+  Subquery Scan on z1
+    Filter: f_leak(z1.b)
+    ->  Seq Scan on z1 z1_1
+          Filter: ((a % 2) = 0)
+ (4 rows)
+ PREPARE plancache_test2 AS WITH q AS (SELECT * FROM z1 WHERE f_leak(b)) SELECT * FROM q,z2;
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test2;
+               QUERY PLAN               
+ ---------------------------------------
+  Nested Loop
+    CTE q
+      ->  Subquery Scan on z1
+            Filter: f_leak(z1.b)
+            ->  Seq Scan on z1 z1_1
+                  Filter: ((a % 2) = 0)
+    ->  CTE Scan on q
+    ->  Materialize
+          ->  Seq Scan on z2
+ (9 rows)
+ PREPARE plancache_test3 AS WITH q AS (SELECT * FROM z2) SELECT * FROM q,z1 WHERE f_leak(z1.b);
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test3;
+                 QUERY PLAN                 
+ -------------------------------------------
+  Nested Loop
+    CTE q
+      ->  Seq Scan on z2
+    ->  CTE Scan on q
+    ->  Materialize
+          ->  Subquery Scan on z1
+                Filter: f_leak(z1.b)
+                ->  Seq Scan on z1 z1_1
+                      Filter: ((a % 2) = 0)
+ (9 rows)
+ SET ROLE regress_rls_group1;
  SELECT * FROM z1 WHERE f_leak(b);
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => dad
   a |  b  
  ---+-----
   2 | bbb
  (2 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b);
 -          QUERY PLAN           
 --------------------------------
 - Subquery Scan on z1
 -   Filter: f_leak(z1.b)
 -   ->  Seq Scan on z1 z1_1
 -         Filter: ((a % 2) = 0)
 -(4 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Subquery Scan on z1
 +         Filter: f_leak(z1.b)
 +         ->  Seq Scan on z1 z1_1
 +               Filter: ((a % 2) = 0)
 +(5 rows)
  
- SET SESSION AUTHORIZATION rls_regress_user2;
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test;
+           QUERY PLAN           
+ -------------------------------
+  Subquery Scan on z1
+    Filter: f_leak(z1.b)
+    ->  Seq Scan on z1 z1_1
+          Filter: ((a % 2) = 0)
+ (4 rows)
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test2;
+               QUERY PLAN               
+ ---------------------------------------
+  Nested Loop
+    CTE q
+      ->  Subquery Scan on z1
+            Filter: f_leak(z1.b)
+            ->  Seq Scan on z1 z1_1
+                  Filter: ((a % 2) = 0)
+    ->  CTE Scan on q
+    ->  Materialize
+          ->  Seq Scan on z2
+ (9 rows)
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test3;
+                 QUERY PLAN                 
+ -------------------------------------------
+  Nested Loop
+    CTE q
+      ->  Seq Scan on z2
+    ->  CTE Scan on q
+    ->  Materialize
+          ->  Subquery Scan on z1
+                Filter: f_leak(z1.b)
+                ->  Seq Scan on z1 z1_1
+                      Filter: ((a % 2) = 0)
+ (9 rows)
+ SET SESSION AUTHORIZATION regress_rls_carol;
  SELECT * FROM z1 WHERE f_leak(b);
 -NOTICE:  f_leak => aba
 -NOTICE:  f_leak => ccc
   a |  b  
  ---+-----
-  1 | aaa
+  1 | aba
   3 | ccc
  (2 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b);
 -          QUERY PLAN           
 --------------------------------
 - Subquery Scan on z1
 -   Filter: f_leak(z1.b)
 -   ->  Seq Scan on z1 z1_1
 -         Filter: ((a % 2) = 1)
 -(4 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Subquery Scan on z1
 +         Filter: f_leak(z1.b)
 +         ->  Seq Scan on z1 z1_1
 +               Filter: ((a % 2) = 1)
 +(5 rows)
  
- SET ROLE rls_regress_group2;
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test;
+           QUERY PLAN           
+ -------------------------------
+  Subquery Scan on z1
+    Filter: f_leak(z1.b)
+    ->  Seq Scan on z1 z1_1
+          Filter: ((a % 2) = 1)
+ (4 rows)
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test2;
+               QUERY PLAN               
+ ---------------------------------------
+  Nested Loop
+    CTE q
+      ->  Subquery Scan on z1
+            Filter: f_leak(z1.b)
+            ->  Seq Scan on z1 z1_1
+                  Filter: ((a % 2) = 1)
+    ->  CTE Scan on q
+    ->  Materialize
+          ->  Seq Scan on z2
+ (9 rows)
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test3;
+                 QUERY PLAN                 
+ -------------------------------------------
+  Nested Loop
+    CTE q
+      ->  Seq Scan on z2
+    ->  CTE Scan on q
+    ->  Materialize
+          ->  Subquery Scan on z1
+                Filter: f_leak(z1.b)
+                ->  Seq Scan on z1 z1_1
+                      Filter: ((a % 2) = 1)
+ (9 rows)
+ SET ROLE regress_rls_group2;
  SELECT * FROM z1 WHERE f_leak(b);
 -NOTICE:  f_leak => aba
 -NOTICE:  f_leak => ccc
   a |  b  
  ---+-----
-  1 | aaa
+  1 | aba
   3 | ccc
  (2 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b);
 -          QUERY PLAN           
 --------------------------------
 - Subquery Scan on z1
 -   Filter: f_leak(z1.b)
 -   ->  Seq Scan on z1 z1_1
 -         Filter: ((a % 2) = 1)
 -(4 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Subquery Scan on z1
 +         Filter: f_leak(z1.b)
 +         ->  Seq Scan on z1 z1_1
 +               Filter: ((a % 2) = 1)
 +(5 rows)
  
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test;
+           QUERY PLAN           
+ -------------------------------
+  Subquery Scan on z1
+    Filter: f_leak(z1.b)
+    ->  Seq Scan on z1 z1_1
+          Filter: ((a % 2) = 1)
+ (4 rows)
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test2;
+               QUERY PLAN               
+ ---------------------------------------
+  Nested Loop
+    CTE q
+      ->  Subquery Scan on z1
+            Filter: f_leak(z1.b)
+            ->  Seq Scan on z1 z1_1
+                  Filter: ((a % 2) = 1)
+    ->  CTE Scan on q
+    ->  Materialize
+          ->  Seq Scan on z2
+ (9 rows)
+ EXPLAIN (COSTS OFF) EXECUTE plancache_test3;
+                 QUERY PLAN                 
+ -------------------------------------------
+  Nested Loop
+    CTE q
+      ->  Seq Scan on z2
+    ->  CTE Scan on q
+    ->  Materialize
+          ->  Subquery Scan on z1
+                Filter: f_leak(z1.b)
+                ->  Seq Scan on z1 z1_1
+                      Filter: ((a % 2) = 1)
+ (9 rows)
  --
  -- Views should follow policy for view owner.
  --
  -- View and Table owner are the same.
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b);
- GRANT SELECT ON rls_view TO rls_regress_user1;
+ GRANT SELECT ON rls_view TO regress_rls_bob;
  -- Query as role that is not owner of view or table.  Should return all records.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM rls_view;
 -NOTICE:  f_leak => aba
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => ccc
 -NOTICE:  f_leak => dad
   a |  b  
  ---+-----
-  1 | aaa
+  1 | aba
   2 | bbb
   3 | ccc
-  4 | ddd
+  4 | dad
  (4 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
 -     QUERY PLAN      
 ----------------------
 - Seq Scan on z1
 -   Filter: f_leak(b)
 -(2 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Seq Scan on z1
 +         Filter: f_leak(b)
 +(3 rows)
  
  -- Query as view/table owner.  Should return all records.
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  SELECT * FROM rls_view;
 -NOTICE:  f_leak => aba
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => ccc
 -NOTICE:  f_leak => dad
   a |  b  
  ---+-----
-  1 | aaa
+  1 | aba
   2 | bbb
   3 | ccc
-  4 | ddd
+  4 | dad
  (4 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
  
  DROP VIEW rls_view;
  -- View and Table owners are different.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b);
- GRANT SELECT ON rls_view TO rls_regress_user0;
+ GRANT SELECT ON rls_view TO regress_rls_alice;
  -- Query as role that is not owner of view but is owner of table.
  -- Should return records based on view owner policies.
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  SELECT * FROM rls_view;
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => dad
   a |  b  
  ---+-----
   2 | bbb
@@@ -1608,8 -1966,10 +1776,8 @@@ EXPLAIN (COSTS OFF) SELECT * FROM rls_v
  
  -- Query as role that is not owner of table but is owner of view.
  -- Should return records based on view owner policies.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM rls_view;
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => dad
   a |  b  
  ---+-----
   2 | bbb
  (2 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
 -          QUERY PLAN           
 --------------------------------
 - Subquery Scan on z1
 -   Filter: f_leak(z1.b)
 -   ->  Seq Scan on z1 z1_1
 -         Filter: ((a % 2) = 0)
 -(4 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Subquery Scan on z1
 +         Filter: f_leak(z1.b)
 +         ->  Seq Scan on z1 z1_1
 +               Filter: ((a % 2) = 0)
 +(5 rows)
  
  -- Query as role that is not the owner of the table or view without permissions.
- SET SESSION AUTHORIZATION rls_regress_user2;
+ SET SESSION AUTHORIZATION regress_rls_carol;
  SELECT * FROM rls_view; --fail - permission denied.
  ERROR:  permission denied for relation rls_view
  EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied.
  ERROR:  permission denied for relation rls_view
  -- Query as role that is not the owner of the table or view with permissions.
- SET SESSION AUTHORIZATION rls_regress_user1;
- GRANT SELECT ON rls_view TO rls_regress_user2;
+ SET SESSION AUTHORIZATION regress_rls_bob;
+ GRANT SELECT ON rls_view TO regress_rls_carol;
  SELECT * FROM rls_view;
 -NOTICE:  f_leak => bbb
 -NOTICE:  f_leak => dad
   a |  b  
  ---+-----
   2 | bbb
  (2 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
 -          QUERY PLAN           
 --------------------------------
 - Subquery Scan on z1
 -   Filter: f_leak(z1.b)
 -   ->  Seq Scan on z1 z1_1
 -         Filter: ((a % 2) = 0)
 -(4 rows)
 +                     QUERY PLAN                      
 +-----------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Subquery Scan on z1
 +         Filter: f_leak(z1.b)
 +         ->  Seq Scan on z1 z1_1
 +               Filter: ((a % 2) = 0)
 +(5 rows)
  
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  DROP VIEW rls_view;
  --
  -- Command specific
@@@ -1675,39 -2035,57 +1843,39 @@@ CREATE POLICY p2 ON x1 FOR INSERT WITH 
  CREATE POLICY p3 ON x1 FOR UPDATE USING (a % 2 = 0);
  CREATE POLICY p4 ON x1 FOR DELETE USING (a < 8);
  ALTER TABLE x1 ENABLE ROW LEVEL SECURITY;
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC;
 -NOTICE:  f_leak => abc
 -NOTICE:  f_leak => bcd
 -NOTICE:  f_leak => def
 -NOTICE:  f_leak => efg
 -NOTICE:  f_leak => fgh
 -NOTICE:  f_leak => fgh
   a |  b  |         c         
  ---+-----+-------------------
-  1 | abc | rls_regress_user1
-  2 | bcd | rls_regress_user1
-  4 | def | rls_regress_user2
-  5 | efg | rls_regress_user1
-  6 | fgh | rls_regress_user1
-  8 | fgh | rls_regress_user2
+  1 | abc | regress_rls_bob
+  2 | bcd | regress_rls_bob
+  4 | def | regress_rls_carol
+  5 | efg | regress_rls_bob
+  6 | fgh | regress_rls_bob
+  8 | fgh | regress_rls_carol
  (6 rows)
  
  UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *;
 -NOTICE:  f_leak => abc
 -NOTICE:  f_leak => bcd
 -NOTICE:  f_leak => def
 -NOTICE:  f_leak => efg
 -NOTICE:  f_leak => fgh
 -NOTICE:  f_leak => fgh
 - a |    b     |         c         
 ----+----------+-------------------
 - 1 | abc_updt | regress_rls_bob
 - 2 | bcd_updt | regress_rls_bob
 - 4 | def_updt | regress_rls_carol
 - 5 | efg_updt | regress_rls_bob
 - 6 | fgh_updt | regress_rls_bob
 - 8 | fgh_updt | regress_rls_carol
 + a |    b     |         c         
 +---+----------+-------------------
 + 1 | abc_updt | rls_regress_user1
 + 2 | bcd_updt | rls_regress_user1
 + 5 | efg_updt | rls_regress_user1
 + 6 | fgh_updt | rls_regress_user1
 + 8 | fgh_updt | rls_regress_user2
 + 4 | def_updt | rls_regress_user2
  (6 rows)
  
- SET SESSION AUTHORIZATION rls_regress_user2;
+ SET SESSION AUTHORIZATION regress_rls_carol;
  SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC;
 -NOTICE:  f_leak => cde
 -NOTICE:  f_leak => fgh
 -NOTICE:  f_leak => bcd_updt
 -NOTICE:  f_leak => def_updt
 -NOTICE:  f_leak => fgh_updt
 -NOTICE:  f_leak => fgh_updt
   a |    b     |         c         
  ---+----------+-------------------
-  2 | bcd_updt | rls_regress_user1
-  3 | cde      | rls_regress_user2
-  4 | def_updt | rls_regress_user2
-  6 | fgh_updt | rls_regress_user1
-  7 | fgh      | rls_regress_user2
-  8 | fgh_updt | rls_regress_user2
+  2 | bcd_updt | regress_rls_bob
+  3 | cde      | regress_rls_carol
+  4 | def_updt | regress_rls_carol
+  6 | fgh_updt | regress_rls_bob
+  7 | fgh      | regress_rls_carol
+  8 | fgh_updt | regress_rls_carol
  (6 rows)
  
  UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *;
@@@ -1784,11 -2172,28 +1952,11 @@@ SET SESSION AUTHORIZATION regress_rls_a
  INSERT INTO y2 (SELECT x, md5(x::text) FROM generate_series(0,20) x);
  CREATE POLICY p2 ON y2 USING (a % 3 = 0);
  CREATE POLICY p3 ON y2 USING (a % 4 = 0);
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM y2 WHERE f_leak(b);
 -NOTICE:  f_leak => cfcd208495d565ef66e7dff9f98764da
 -NOTICE:  f_leak => c81e728d9d4c2f636f067f89cc14862c
 -NOTICE:  f_leak => eccbc87e4b5ce2fe28308fd9f2a7baf3
 -NOTICE:  f_leak => a87ff679a2f3e71d9181a67b7542122c
 -NOTICE:  f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
 -NOTICE:  f_leak => c9f0f895fb98ab9159f51fd0297e236d
 -NOTICE:  f_leak => 45c48cce2e2d7fbdea1afc51c7c6ad26
 -NOTICE:  f_leak => d3d9446802a44259755d38e6d163e820
 -NOTICE:  f_leak => c20ad4d76fe97759aa27a0c99bff6710
 -NOTICE:  f_leak => aab3238922bcc25a6f606eb525ffdc56
 -NOTICE:  f_leak => 9bf31c7ff062936a96d3c8bd1f8f2ff3
 -NOTICE:  f_leak => c74d97b01eae257e44aa9d5bade97baf
 -NOTICE:  f_leak => 6f4922f45568161a8cdf4ad2299f6d23
 -NOTICE:  f_leak => 98f13708210194c475687be6106a3b84
   a  |                b                 
  ----+----------------------------------
 -  0 | cfcd208495d565ef66e7dff9f98764da
    2 | c81e728d9d4c2f636f067f89cc14862c
 -  3 | eccbc87e4b5ce2fe28308fd9f2a7baf3
 -  4 | a87ff679a2f3e71d9181a67b7542122c
    6 | 1679091c5a880faf6fb5e6087eb1b2dc
    8 | c9f0f895fb98ab9159f51fd0297e236d
    9 | 45c48cce2e2d7fbdea1afc51c7c6ad26
@@@ -1837,15 -2259,14 +2005,15 @@@ SELECT * FROM y2 WHERE f_leak('abc')
  (14 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak('abc');
 -                                      QUERY PLAN                                       
 ----------------------------------------------------------------------------------------
 - Seq Scan on y2
 -   Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)))
 -(2 rows)
 +                                         QUERY PLAN                                          
 +---------------------------------------------------------------------------------------------
 + Remote Subquery Scan on all (datanode_1,datanode_2)
 +   ->  Seq Scan on y2
 +         Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)))
 +(3 rows)
  
  CREATE TABLE test_qual_pushdown (
-       abc             text
+     abc text
  );
  INSERT INTO test_qual_pushdown VALUES ('abc'),('def');
  SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc);
@@@ -1899,47 -2330,41 +2067,57 @@@ SET client_min_messages TO 'warning'
  DROP TABLE t1 CASCADE;
  RESET client_min_messages;
  CREATE TABLE t1 (a integer);
- GRANT SELECT ON t1 TO rls_regress_user1, rls_regress_user2;
- CREATE POLICY p1 ON t1 TO rls_regress_user1 USING ((a % 2) = 0);
- CREATE POLICY p2 ON t1 TO rls_regress_user2 USING ((a % 4) = 0);
+ GRANT SELECT ON t1 TO regress_rls_bob, regress_rls_carol;
+ CREATE POLICY p1 ON t1 TO regress_rls_bob USING ((a % 2) = 0);
+ CREATE POLICY p2 ON t1 TO regress_rls_carol USING ((a % 4) = 0);
  ALTER TABLE t1 ENABLE ROW LEVEL SECURITY;
 --- Prepare as regress_rls_bob
 -SET ROLE regress_rls_bob;
 +-- Prepare as rls_regress_user1
 +SET ROLE rls_regress_user1;
  PREPARE role_inval AS SELECT * FROM t1;
  -- Check plan
  EXPLAIN (COSTS OFF) EXECUTE role_inval;
 -       QUERY PLAN        
 --------------------------
 - Seq Scan on t1
 -   Filter: ((a % 2) = 0)
 -(2 rows)
 +            QUERY PLAN            
 +----------------------------------
 + Remote Fast Query Execution
 +   Node/s: datanode_1, datanode_2
 +   ->  Seq Scan on t1
 +         Filter: ((a % 2) = 0)
 +(4 rows)
  
 --- Change to regress_rls_carol
 -SET ROLE regress_rls_carol;
 +-- Change to rls_regress_user2
 +SET ROLE rls_regress_user2;
  -- Check plan- should be different
  EXPLAIN (COSTS OFF) EXECUTE role_inval;
 -       QUERY PLAN        
 --------------------------
 - Seq Scan on t1
 -   Filter: ((a % 4) = 0)
 -(2 rows)
 +            QUERY PLAN            
 +----------------------------------
 + Remote Fast Query Execution
 +   Node/s: datanode_1, datanode_2
 +   ->  Seq Scan on t1
 +         Filter: ((a % 4) = 0)
 +(4 rows)
 +
 +-- Change back to rls_regress_user1
 +SET ROLE rls_regress_user1;
 +-- Check plan- should be back to original
 +EXPLAIN (COSTS OFF) EXECUTE role_inval;
 +            QUERY PLAN            
 +----------------------------------
 + Remote Fast Query Execution
 +   Node/s: datanode_1, datanode_2
 +   ->  Seq Scan on t1
 +         Filter: ((a % 2) = 0)
 +(4 rows)
  
+ -- Change back to regress_rls_bob
+ SET ROLE regress_rls_bob;
+ -- Check plan- should be back to original
+ EXPLAIN (COSTS OFF) EXECUTE role_inval;
+        QUERY PLAN        
+ -------------------------
+  Seq Scan on t1
+    Filter: ((a % 2) = 0)
+ (2 rows)
  --
  -- CTE and RLS
  --
@@@ -1948,19 -2373,30 +2126,19 @@@ DROP TABLE t1 CASCADE
  CREATE TABLE t1 (a integer, b text);
  CREATE POLICY p1 ON t1 USING (a % 2 = 0);
  ALTER TABLE t1 ENABLE ROW LEVEL SECURITY;
- GRANT ALL ON t1 TO rls_regress_user1;
+ GRANT ALL ON t1 TO regress_rls_bob;
  INSERT INTO t1 (SELECT x, md5(x::text) FROM generate_series(0,20) x);
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  WITH cte1 AS (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1;
 -NOTICE:  f_leak => cfcd208495d565ef66e7dff9f98764da
 -NOTICE:  f_leak => c81e728d9d4c2f636f067f89cc14862c
 -NOTICE:  f_leak => a87ff679a2f3e71d9181a67b7542122c
 -NOTICE:  f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
 -NOTICE:  f_leak => c9f0f895fb98ab9159f51fd0297e236d
 -NOTICE:  f_leak => d3d9446802a44259755d38e6d163e820
 -NOTICE:  f_leak => c20ad4d76fe97759aa27a0c99bff6710
 -NOTICE:  f_leak => aab3238922bcc25a6f606eb525ffdc56
 -NOTICE:  f_leak => c74d97b01eae257e44aa9d5bade97baf
 -NOTICE:  f_leak => 6f4922f45568161a8cdf4ad2299f6d23
 -NOTICE:  f_leak => 98f13708210194c475687be6106a3b84
   a  |                b                 
  ----+----------------------------------
 -  0 | cfcd208495d565ef66e7dff9f98764da
    2 | c81e728d9d4c2f636f067f89cc14862c
 -  4 | a87ff679a2f3e71d9181a67b7542122c
    6 | 1679091c5a880faf6fb5e6087eb1b2dc
    8 | c9f0f895fb98ab9159f51fd0297e236d
 - 10 | d3d9446802a44259755d38e6d163e820
   12 | c20ad4d76fe97759aa27a0c99bff6710
 +  0 | cfcd208495d565ef66e7dff9f98764da
 +  4 | a87ff679a2f3e71d9181a67b7542122c
 + 10 | d3d9446802a44259755d38e6d163e820
   14 | aab3238922bcc25a6f606eb525ffdc56
   16 | c74d97b01eae257e44aa9d5bade97baf
   18 | 6f4922f45568161a8cdf4ad2299f6d23
@@@ -2121,14 -2573,14 +2299,14 @@@ SELECT id, author, message FROM blog JO
  SELECT id, author, message FROM comment JOIN blog ON id = blog_id;
   id | author |   message   
  ----+--------+-------------
 -  4 | alice  | insane!
    2 | bob    | who did it?
 +  4 | alice  | insane!
  (2 rows)
  
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  CREATE POLICY comment_1 ON comment USING (blog_id < 4);
  ALTER TABLE comment ENABLE ROW LEVEL SECURITY;
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  -- Check RLS JOIN RLS
  SELECT id, author, message FROM blog JOIN comment ON id = blog_id;
   id | author |   message   
@@@ -2176,18 -2628,17 +2354,18 @@@ SELECT * FROM t1
   16 | c74d97b01eae257e44aa9d5bade97baf
   18 | 6f4922f45568161a8cdf4ad2299f6d23
   20 | 98f13708210194c475687be6106a3b84
 - 20 | Success
 -(22 rows)
 +(21 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM t1;
 -   QUERY PLAN   
 -----------------
 - Seq Scan on t1
 -(1 row)
 +            QUERY PLAN            
 +----------------------------------
 + Remote Fast Query Execution
 +   Node/s: datanode_1, datanode_2
 +   ->  Seq Scan on t1
 +(3 rows)
  
  -- Check that default deny does not apply to table owner.
- SET SESSION AUTHORIZATION rls_regress_user0;
+ SET SESSION AUTHORIZATION regress_rls_alice;
  SELECT * FROM t1;
   a  |                b                 
  ----+----------------------------------
   16 | c74d97b01eae257e44aa9d5bade97baf
   18 | 6f4922f45568161a8cdf4ad2299f6d23
   20 | 98f13708210194c475687be6106a3b84
 - 20 | Success
 -(22 rows)
 +(21 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM t1;
 -   QUERY PLAN   
 -----------------
 - Seq Scan on t1
 -(1 row)
 +            QUERY PLAN            
 +----------------------------------
 + Remote Fast Query Execution
 +   Node/s: datanode_1, datanode_2
 +   ->  Seq Scan on t1
 +(3 rows)
  
  -- Check that default deny applies to non-owner/non-superuser when RLS on.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SET row_security TO ON;
  SELECT * FROM t1;
   a | b 
  (0 rows)
  
  EXPLAIN (COSTS OFF) SELECT * FROM t1;
 -        QUERY PLAN        
 ---------------------------
 - Result
 -   One-Time Filter: false
 -(2 rows)
 +            QUERY PLAN            
 +----------------------------------
 + Remote Fast Query Execution
 +   Node/s: datanode_1, datanode_2
 +   ->  Result
 +         One-Time Filter: false
 +(4 rows)
  
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SELECT * FROM t1;
   a | b 
  ---+---
@@@ -2324,16 -2770,21 +2502,21 @@@ COPY (SELECT * FROM copy_t ORDER BY a A
  SET row_security TO ON;
  COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok
  0,cfcd208495d565ef66e7dff9f98764da
+ 1,c4ca4238a0b923820dcc509a6f75849b
  2,c81e728d9d4c2f636f067f89cc14862c
+ 3,eccbc87e4b5ce2fe28308fd9f2a7baf3
  4,a87ff679a2f3e71d9181a67b7542122c
+ 5,e4da3b7fbbce2345d7772b0674a318d5
  6,1679091c5a880faf6fb5e6087eb1b2dc
+ 7,8f14e45fceea167a5a36dedd4bea2543
  8,c9f0f895fb98ab9159f51fd0297e236d
+ 9,45c48cce2e2d7fbdea1afc51c7c6ad26
  10,d3d9446802a44259755d38e6d163e820
  -- Check COPY TO as user without permissions. SET row_security TO OFF;
 -SET SESSION AUTHORIZATION regress_rls_carol;
 +SET SESSION AUTHORIZATION rls_regress_user2;
  SET row_security TO OFF;
- COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - insufficient to bypass rls
- ERROR:  insufficient privilege to bypass row security.
+ COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS
+ ERROR:  query would be affected by row-level security policy for table "copy_t"
  SET row_security TO ON;
  COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - permission denied
  ERROR:  permission denied for relation copy_t
@@@ -2382,24 -2834,20 +2565,24 @@@ COPY copy_t FROM STDIN; --o
  SET row_security TO ON;
  COPY copy_t FROM STDIN; --ok
  -- Check COPY FROM as user with permissions.
- SET SESSION AUTHORIZATION rls_regress_user1;
+ SET SESSION AUTHORIZATION regress_rls_bob;
  SET row_security TO OFF;
- COPY copy_t FROM STDIN; --fail - insufficient privilege to bypass rls.
- ERROR:  insufficient privilege to bypass row security.
+ COPY copy_t FROM STDIN; --fail - would be affected by RLS.
+ ERROR:  query would be affected by row-level security policy for table "copy_t"
  SET row_security TO ON;
  COPY copy_t FROM STDIN; --fail - COPY FROM not supported by RLS.
 -ERROR:  COPY FROM not supported with row-level security
 -HINT:  Use INSERT statements instead.
 --- Check COPY FROM as user with permissions and BYPASSRLS
 -SET SESSION AUTHORIZATION regress_rls_exempt_user;
 -SET row_security TO ON;
 +ERROR:  COPY FROM not supported with row level security.
 +HINT:  Use direct INSERT statements instead.
 +-- Check COPY TO as user with permissions and BYPASSRLS
 +SET SESSION AUTHORIZATION rls_regress_exempt_user;
 +SET row_security TO OFF;
  COPY copy_t FROM STDIN; --ok
 +SET row_security TO ON;
 +COPY copy_t FROM STDIN; --fail - COPY FROM not supported by RLS.
 +ERROR:  COPY FROM not supported with row level security.
 +HINT:  Use direct INSERT statements instead.
  -- Check COPY FROM as user without permissions.
- SET SESSION AUTHORIZATION rls_regress_user2;
+ SET SESSION AUTHORIZATION regress_rls_carol;
  SET row_security TO OFF;
  COPY copy_t FROM STDIN; --fail - permission denied.
  ERROR:  permission denied for relation copy_t
@@@ -2714,17 -3456,17 +3153,17 @@@ DROP USER regress_rls_dob_role2
  RESET SESSION AUTHORIZATION;
  -- Suppress NOTICE messages when doing a cascaded drop.
  SET client_min_messages TO 'warning';
- DROP SCHEMA rls_regress_schema CASCADE;
+ DROP SCHEMA regress_rls_schema CASCADE;
  RESET client_min_messages;
 -DROP USER regress_rls_alice;
 -DROP USER regress_rls_bob;
 -DROP USER regress_rls_carol;
 -DROP USER regress_rls_exempt_user;
 -DROP ROLE regress_rls_group1;
 -DROP ROLE regress_rls_group2;
 +DROP USER rls_regress_user0;
 +DROP USER rls_regress_user1;
 +DROP USER rls_regress_user2;
 +DROP USER rls_regress_exempt_user;
 +DROP ROLE rls_regress_group1;
 +DROP ROLE rls_regress_group2;
  -- Arrange to have a few policies left over, for testing
  -- pg_dump/pg_restore
 -CREATE SCHEMA regress_rls_schema;
 +CREATE SCHEMA rls_regress_schema;
  CREATE TABLE rls_tbl (c1 int);
  ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY;
  CREATE POLICY p1 ON rls_tbl USING (c1 > 5);
Simple merge
Simple merge
Simple merge
index 12e87f06f80f5ee5fb2c3803c514a3814a70942e,727a83543973436293d6f6371374a25b54a66078..49587295a4c9bf2f721a2507c1cbaf2ffb1f9c72
@@@ -19,11 -21,13 +21,12 @@@ SELECT id FROM test_tablesample TABLESA
  SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0);
   id 
  ----
+   3
+   4
+   5
    6
 -  7
    8
-   9
-   7
- (4 rows)
+ (6 rows)
  
  SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (50) REPEATABLE (0);
   id 
@@@ -218,58 -227,58 +227,117 @@@ SELECT count(*) FROM test_tablesample T
       0
  (1 row)
  
+ -- check behavior during rescans, as well as correct handling of min/max pct
+ select * from
+   (values (0),(100)) v(pct),
+   lateral (select count(*) from tenk1 tablesample bernoulli (pct)) ss;
+  pct | count 
+ -----+-------
+    0 |     0
+  100 | 10000
+ (2 rows)
+ select * from
+   (values (0),(100)) v(pct),
+   lateral (select count(*) from tenk1 tablesample system (pct)) ss;
+  pct | count 
+ -----+-------
+    0 |     0
+  100 | 10000
+ (2 rows)
+ explain (costs off)
+ select pct, count(unique1) from
+   (values (0),(100)) v(pct),
+   lateral (select * from tenk1 tablesample bernoulli (pct)) ss
+   group by pct;
+                        QUERY PLAN                       
+ --------------------------------------------------------
+  HashAggregate
+    Group Key: "*VALUES*".column1
+    ->  Nested Loop
+          ->  Values Scan on "*VALUES*"
+          ->  Sample Scan on tenk1
+                Sampling: bernoulli ("*VALUES*".column1)
+ (6 rows)
+ select pct, count(unique1) from
+   (values (0),(100)) v(pct),
+   lateral (select * from tenk1 tablesample bernoulli (pct)) ss
+   group by pct;
+  pct | count 
+ -----+-------
+  100 | 10000
+ (1 row)
+ select pct, count(unique1) from
+   (values (0),(100)) v(pct),
+   lateral (select * from tenk1 tablesample system (pct)) ss
+   group by pct;
+  pct | count 
+ -----+-------
+  100 | 10000
+ (1 row)
++-- check that collations get assigned within the tablesample arguments
++SELECT count(*) FROM test_tablesample TABLESAMPLE bernoulli (('1'::text < '0'::text)::int);
++ count 
++-------
++     0
++(1 row)
++
 +-- check behavior during rescans, as well as correct handling of min/max pct
 +select * from
 +  (values (0),(100)) v(pct),
 +  lateral (select count(*) from tenk1 tablesample bernoulli (pct)) ss;
 + pct | count 
 +-----+-------
 +   0 |     0
 + 100 |     0
 +(2 rows)
 +
 +select * from
 +  (values (0),(100)) v(pct),
 +  lateral (select count(*) from tenk1 tablesample system (pct)) ss;
 + pct | count 
 +-----+-------
 +   0 |     0
 + 100 |     0
 +(2 rows)
 +
 +explain (costs off)
 +select pct, count(unique1) from
 +  (values (0),(100)) v(pct),
 +  lateral (select * from tenk1 tablesample bernoulli (pct)) ss
 +  group by pct;
 +                              QUERY PLAN                               
 +-----------------------------------------------------------------------
 + HashAggregate
 +   Group Key: "*VALUES*".column1
 +   ->  Nested Loop
 +         ->  Values Scan on "*VALUES*"
 +         ->  Materialize
 +               ->  Remote Subquery Scan on all (datanode_1,datanode_2)
 +                     ->  Sample Scan on tenk1
 +                           Sampling: bernoulli ("*VALUES*".column1)
 +(8 rows)
 +
 +select pct, count(unique1) from
 +  (values (0),(100)) v(pct),
 +  lateral (select * from tenk1 tablesample bernoulli (pct)) ss
 +  group by pct;
 + pct | count 
 +-----+-------
 +(0 rows)
 +
 +select pct, count(unique1) from
 +  (values (0),(100)) v(pct),
 +  lateral (select * from tenk1 tablesample system (pct)) ss
 +  group by pct;
 + pct | count 
 +-----+-------
 +(0 rows)
 +
  -- errors
  SELECT id FROM test_tablesample TABLESAMPLE FOOBAR (1);
  ERROR:  tablesample method foobar does not exist
Simple merge
index 3a2a671c2b5c2cf801edfb618ad7c78a9f106d5f,4a2fabddd9f68473c90b3e8263069a0375dcffb6..0801c3084344c992993aa965fc3b60782379cbcf
@@@ -238,27 -246,35 +238,39 @@@ SELECT '' AS "64", d1 FROM TIMESTAMP_TB
      | Wed Dec 31 17:32:01 1997
      | Fri Dec 31 17:32:01 1999
      | Sat Jan 01 17:32:01 2000
 +    | Wed Mar 15 02:14:05 2000
 +    | Wed Mar 15 03:14:04 2000
 +    | Wed Mar 15 08:14:01 2000
 +    | Wed Mar 15 12:14:03 2000
 +    | Wed Mar 15 13:14:02 2000
      | Sun Dec 31 17:32:01 2000
      | Mon Jan 01 17:32:01 2001
 +    | Sat Sep 22 18:19:20 2001
 +    | Sat Feb 16 17:32:01 2097
 +    | infinity
  (65 rows)
  
+ -- Check behavior at the lower boundary of the timestamp range
+ SELECT '4714-11-24 00:00:00 BC'::timestamp;
+           timestamp          
+ -----------------------------
+  Mon Nov 24 00:00:00 4714 BC
+ (1 row)
+ SELECT '4714-11-23 23:59:59 BC'::timestamp;  -- out of range
+ ERROR:  timestamp out of range: "4714-11-23 23:59:59 BC"
+ LINE 1: SELECT '4714-11-23 23:59:59 BC'::timestamp;
+                ^
+ -- The upper boundary differs between integer and float timestamps, so no check
  -- Demonstrate functions and operators
  SELECT '' AS "48", d1 FROM TIMESTAMP_TBL
 -   WHERE d1 > timestamp without time zone '1997-01-02';
 +   WHERE d1 > timestamp without time zone '1997-01-02' ORDER BY d1;
   48 |             d1             
  ----+----------------------------
 -    | infinity
 +    | Thu Jan 02 03:04:05 1997
 +    | Mon Feb 10 17:32:00 1997
      | Mon Feb 10 17:32:01 1997
      | Mon Feb 10 17:32:01 1997
 -    | Mon Feb 10 17:32:02 1997
 -    | Mon Feb 10 17:32:01.4 1997
 -    | Mon Feb 10 17:32:01.5 1997
 -    | Mon Feb 10 17:32:01.6 1997
 -    | Thu Jan 02 03:04:05 1997
      | Mon Feb 10 17:32:01 1997
      | Mon Feb 10 17:32:01 1997
      | Mon Feb 10 17:32:01 1997
@@@ -996,11 -1004,23 +1008,18 @@@ SELECT '' AS to_char_3, to_char(d1, 'Y,
             | 1,997 1997 997 97 7 20 1 02 07 045 14 6 2450494
             | 1,997 1997 997 97 7 20 1 02 07 046 15 7 2450495
             | 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
+            | 0,097 0097 097 97 7 -01 1 02 07 047 16 3 1686042
+            | 0,097 0097 097 97 7 01 1 02 07 047 16 7 1756536
+            | 0,597 0597 597 97 7 06 1 02 07 047 16 5 1939157
+            | 1,097 1097 097 97 7 11 1 02 07 047 16 3 2121778
+            | 1,697 1697 697 97 7 17 1 02 07 047 16 7 2340924
+            | 1,797 1797 797 97 7 18 1 02 07 047 16 5 2377448
+            | 1,897 1897 897 97 7 19 1 02 07 047 16 3 2413972
             | 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
 -           | 2,097 2097 097 97 7 21 1 02 07 047 16 7 2487021
 -           | 1,996 1996 996 96 6 20 1 02 09 059 28 4 2450142
 -           | 1,996 1996 996 96 6 20 1 02 09 060 29 5 2450143
 -           | 1,996 1996 996 96 6 20 1 03 09 061 01 6 2450144
 -           | 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448
 -           | 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449
 -           | 1,997 1997 997 97 7 20 1 01 01 001 01 4 2450450
             | 1,997 1997 997 97 7 20 1 02 09 059 28 6 2450508
             | 1,997 1997 997 97 7 20 1 03 09 060 01 7 2450509
 +           | 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
 +           | 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
             | 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813
             | 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814
             | 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544
index 26b4a837c840395ea12571ebc9f2da6e8ce3aa34,67f26db2048e8c25e94ddc15f420e91a04f7105f..7163e37d8d2d156eba149126bcd7860ce22710c1
@@@ -310,31 -318,47 +310,55 @@@ SELECT '' AS "64", d1 FROM TIMESTAMPTZ_
      | Wed Dec 31 17:32:01 1997 PST
      | Fri Dec 31 17:32:01 1999 PST
      | Sat Jan 01 17:32:01 2000 PST
 +    | Wed Mar 15 01:14:05 2000 PST
 +    | Wed Mar 15 02:14:03 2000 PST
 +    | Wed Mar 15 03:14:04 2000 PST
 +    | Wed Mar 15 04:14:02 2000 PST
 +    | Wed Mar 15 08:14:01 2000 PST
      | Sun Dec 31 17:32:01 2000 PST
      | Mon Jan 01 17:32:01 2001 PST
 +    | Sat Sep 22 18:19:20 2001 PDT
 +    | Sat Feb 16 17:32:01 2097 PST
 +    | infinity
  (66 rows)
  
+ -- Check behavior at the lower boundary of the timestamp range
+ SELECT '4714-11-24 00:00:00+00 BC'::timestamptz;
+            timestamptz           
+ ---------------------------------
+  Sun Nov 23 16:00:00 4714 PST BC
+ (1 row)
+ SELECT '4714-11-23 16:00:00-08 BC'::timestamptz;
+            timestamptz           
+ ---------------------------------
+  Sun Nov 23 16:00:00 4714 PST BC
+ (1 row)
+ SELECT 'Sun Nov 23 16:00:00 4714 PST BC'::timestamptz;
+            timestamptz           
+ ---------------------------------
+  Sun Nov 23 16:00:00 4714 PST BC
+ (1 row)
+ SELECT '4714-11-23 23:59:59+00 BC'::timestamptz;  -- out of range
+ ERROR:  timestamp out of range: "4714-11-23 23:59:59+00 BC"
+ LINE 1: SELECT '4714-11-23 23:59:59+00 BC'::timestamptz;
+                ^
+ -- The upper boundary differs between integer and float timestamps, so no check
  -- Demonstrate functions and operators
  SELECT '' AS "48", d1 FROM TIMESTAMPTZ_TBL
 -   WHERE d1 > timestamp with time zone '1997-01-02';
 +   WHERE d1 > timestamp with time zone '1997-01-02' ORDER BY d1;
   48 |               d1               
  ----+--------------------------------
 -    | infinity
 +    | Thu Jan 02 03:04:05 1997 PST
 +    | Mon Feb 10 09:32:01 1997 PST
 +    | Mon Feb 10 09:32:01 1997 PST
 +    | Mon Feb 10 09:32:01 1997 PST
 +    | Mon Feb 10 14:32:01 1997 PST
 +    | Mon Feb 10 17:32:00 1997 PST
      | Mon Feb 10 17:32:01 1997 PST
      | Mon Feb 10 17:32:01 1997 PST
 -    | Mon Feb 10 17:32:02 1997 PST
 -    | Mon Feb 10 17:32:01.4 1997 PST
 -    | Mon Feb 10 17:32:01.5 1997 PST
 -    | Mon Feb 10 17:32:01.6 1997 PST
 -    | Thu Jan 02 03:04:05 1997 PST
      | Mon Feb 10 17:32:01 1997 PST
      | Mon Feb 10 17:32:01 1997 PST
      | Mon Feb 10 17:32:01 1997 PST
@@@ -1078,12 -1098,23 +1102,19 @@@ SELECT '' AS to_char_3, to_char(d1, 'Y,
             | 1,997 1997 997 97 7 20 1 02 07 045 14 6 2450494
             | 1,997 1997 997 97 7 20 1 02 07 046 15 7 2450495
             | 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
+            | 0,097 0097 097 97 7 -01 1 02 07 047 16 3 1686042
+            | 0,097 0097 097 97 7 01 1 02 07 047 16 7 1756536
+            | 0,597 0597 597 97 7 06 1 02 07 047 16 5 1939157
+            | 1,097 1097 097 97 7 11 1 02 07 047 16 3 2121778
+            | 1,697 1697 697 97 7 17 1 02 07 047 16 7 2340924
+            | 1,797 1797 797 97 7 18 1 02 07 047 16 5 2377448
+            | 1,897 1897 897 97 7 19 1 02 07 047 16 3 2413972
             | 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
 -           | 2,097 2097 097 97 7 21 1 02 07 047 16 7 2487021
 -           | 1,996 1996 996 96 6 20 1 02 09 059 28 4 2450142
 -           | 1,996 1996 996 96 6 20 1 02 09 060 29 5 2450143
 -           | 1,996 1996 996 96 6 20 1 03 09 061 01 6 2450144
 -           | 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448
 -           | 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449
 -           | 1,997 1997 997 97 7 20 1 01 01 001 01 4 2450450
             | 1,997 1997 997 97 7 20 1 02 09 059 28 6 2450508
             | 1,997 1997 997 97 7 20 1 03 09 060 01 7 2450509
 +           | 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
 +           | 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
 +           | 1,997 1997 997 97 7 20 3 07 28 191 10 5 2450640
             | 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813
             | 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814
             | 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544
@@@ -1668,18 -1695,61 +1699,69 @@@ SELECT '' AS to_char_11, to_char(d1, 'F
              | 1998 998 98 8 1 3 3
              | 1999 999 99 9 52 362 5
              | 1999 999 99 9 52 363 6
 +            | 2000 0 0 0 11 73 3
 +            | 2000 0 0 0 11 73 3
 +            | 2000 0 0 0 11 73 3
 +            | 2000 0 0 0 11 73 3
 +            | 2000 0 0 0 11 73 3
              | 2000 0 0 0 52 364 7
              | 2001 1 1 1 1 1 1
 +            | 2001 1 1 1 38 265 6
 +            | 2097 97 97 7 7 48 6
 +            | 
  (66 rows)
  
+ -- Check OF with various zone offsets, particularly fractional hours
+ SET timezone = '00:00';
+ SELECT to_char(now(), 'OF');
+  to_char 
+ ---------
+  +00
+ (1 row)
+ SET timezone = '+02:00';
+ SELECT to_char(now(), 'OF');
+  to_char 
+ ---------
+  -02
+ (1 row)
+ SET timezone = '-13:00';
+ SELECT to_char(now(), 'OF');
+  to_char 
+ ---------
+  +13
+ (1 row)
+ SET timezone = '-00:30';
+ SELECT to_char(now(), 'OF');
+  to_char 
+ ---------
+  +00:30
+ (1 row)
+ SET timezone = '00:30';
+ SELECT to_char(now(), 'OF');
+  to_char 
+ ---------
+  -00:30
+ (1 row)
+ SET timezone = '-04:30';
+ SELECT to_char(now(), 'OF');
+  to_char 
+ ---------
+  +04:30
+ (1 row)
+ SET timezone = '04:30';
+ SELECT to_char(now(), 'OF');
+  to_char 
+ ---------
+  -04:30
+ (1 row)
+ RESET timezone;
  CREATE TABLE TIMESTAMPTZ_TST (a int , b timestamptz);
  -- Test year field value with len > 4
  INSERT INTO TIMESTAMPTZ_TST VALUES(1, 'Sat Mar 12 23:58:48 1000 IST');
Simple merge
Simple merge
index bb126edfdbc7428d379421bfba6a80a9884d9d28,f60991eed0c073706300aa2bf2feb886b4818057..74f5c8a909574c765b639eb58a754e87b96baaa2
@@@ -932,23 -943,28 +932,23 @@@ SELECT * FROM base_tbl
   -1 | Row -1
    0 | Row 0
    1 | Row 1
 -  2 | Updated row 2
 +  2 | Row 2
  (5 rows)
  
 -EXPLAIN (costs off)
 +EXPLAIN (costs off, nodes off)
  UPDATE rw_view1 v SET bb='Updated row 2' WHERE rw_view1_aa(v)=2
    RETURNING rw_view1_aa(v), v.bb;
 -                    QUERY PLAN                    
 ---------------------------------------------------
 - Update on base_tbl
 -   ->  Index Scan using base_tbl_pkey on base_tbl
 -         Index Cond: (a = 2)
 -(3 rows)
 -
 +ERROR:  function rw_view1_aa(rw_view1) does not exist
 +LINE 2: UPDATE rw_view1 v SET bb='Updated row 2' WHERE rw_view1_aa(v...
 +                                                       ^
 +HINT:  No function matches the given name and argument types. You might need to add explicit type casts.
  DROP TABLE base_tbl CASCADE;
 -NOTICE:  drop cascades to 2 other objects
 -DETAIL:  drop cascades to view rw_view1
 -drop cascades to function rw_view1_aa(rw_view1)
 +NOTICE:  drop cascades to view rw_view1
  -- permissions checks
- CREATE USER view_user1;
- CREATE USER view_user2;
- SET SESSION AUTHORIZATION view_user1;
- CREATE TABLE base_tbl(a int, b text, c float) DISTRIBUTE BY REPLICATION;
+ CREATE USER regress_view_user1;
+ CREATE USER regress_view_user2;
+ SET SESSION AUTHORIZATION regress_view_user1;
+ CREATE TABLE base_tbl(a int, b text, c float);
  INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0);
  CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl;
  INSERT INTO rw_view1 VALUES ('Row 2', 2.0, 2);
@@@ -1049,10 -1065,10 +1049,10 @@@ DROP TABLE base_tbl CASCADE
  NOTICE:  drop cascades to 2 other objects
  DETAIL:  drop cascades to view rw_view1
  drop cascades to view rw_view2
- DROP USER view_user1;
- DROP USER view_user2;
+ DROP USER regress_view_user1;
+ DROP USER regress_view_user2;
  -- column defaults
 -CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified', c serial);
 +CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified', c serial) DISTRIBUTE BY REPLICATION;
  INSERT INTO base_tbl VALUES (1, 'Row 1');
  INSERT INTO base_tbl VALUES (2, 'Row 2');
  INSERT INTO base_tbl VALUES (3);
@@@ -1646,9 -1656,9 +1646,9 @@@ CREATE VIEW rw_view1 A
    SELECT * FROM base_tbl b
    WHERE EXISTS(SELECT 1 FROM ref_tbl r WHERE r.a = b.a)
    WITH CHECK OPTION;
 -INSERT INTO rw_view1 VALUES (5); -- ok
 +--INSERT INTO rw_view1 VALUES (5); -- ok
  INSERT INTO rw_view1 VALUES (15); -- should fail
- ERROR:  new row violates WITH CHECK OPTION for "rw_view1"
+ ERROR:  new row violates check option for view "rw_view1"
  DETAIL:  Failing row contains (15).
  UPDATE rw_view1 SET a = a + 5; -- ok
  UPDATE rw_view1 SET a = a + 5; -- should fail
index 813e12310d94f02d3ace769d1afd1474b5ceac66,9b604be4b62e1b472bdc60a31b5c3b19b67f227c..0d5e67ee29100cffb5a2151f94dc969d61713df1
@@@ -69,11 -69,16 +69,12 @@@ CREATE INDEX ON vaccluster(wrap_do_anal
  INSERT INTO vaccluster VALUES (1), (2);
  ANALYZE vaccluster;
  ERROR:  ANALYZE cannot be executed from VACUUM or ANALYZE
 -CONTEXT:  SQL function "do_analyze" statement 1
 -SQL function "wrap_do_analyze" statement 1
  VACUUM FULL pg_am;
  VACUUM FULL pg_class;
 -VACUUM FULL pg_database;
 +VACUUM FULL pg_catalog.pg_database;
  VACUUM FULL vaccluster;
  ERROR:  ANALYZE cannot be executed from VACUUM or ANALYZE
 -CONTEXT:  SQL function "do_analyze" statement 1
 -SQL function "wrap_do_analyze" statement 1
  VACUUM FULL vactst;
+ VACUUM (DISABLE_PAGE_SKIPPING) vaccluster;
  DROP TABLE vaccluster;
  DROP TABLE vactst;
Simple merge
Simple merge
Simple merge
index 9140ff5dcd822b71106566b2f37f89c56cda1728,96d75bccfbb091b45ec0e119b3b9a2f868d479ab..994e8aaa7d7eecb36a1941f3d599353a54817ebe
@@@ -10,10 -10,10 +10,10 @@@ CREATE TABLE lotest_stash_values (loid 
  -- lo_creat(mode integer) returns oid
  -- The mode arg to lo_creat is unused, some vestigal holdover from ancient times
  -- returns the large object id
 -INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42);
 +INSERT INTO lotest_stash_values (loid) VALUES( lo_creat(42) );
  
  -- Test ALTER LARGE OBJECT
- CREATE ROLE regresslo;
+ CREATE ROLE regress_lo_user;
  DO $$
    BEGIN
      EXECUTE 'ALTER LARGE OBJECT ' || (select loid from lotest_stash_values)
Simple merge
index d79f71e3dd7fb6b55d4ee08a42b265047c9c8d03,384f689ac1030d1895591240c2b4ab945abcaca3..caf598689073131d78904a7e9a013464cab7222d
@@@ -43,22 -43,22 +43,22 @@@ SELECT relname, spcname FROM pg_catalog
  (1 row)
  
  PREPARE selectsource(int) AS SELECT $1;
- CREATE TABLE testschema.asexecute TABLESPACE testspace
+ CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace
      AS EXECUTE selectsource(2);
 +ERROR:  CREATE TABLE AS EXECUTE not yet supported
  SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c
      where c.reltablespace = t.oid AND c.relname = 'asexecute';
 -  relname  |     spcname      
 ------------+------------------
 - asexecute | regress_tblspace
 -(1 row)
 + relname | spcname 
 +---------+---------
 +(0 rows)
  
  -- index
- CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE testspace;
+ CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace;
  SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c
      where c.reltablespace = t.oid AND c.relname = 'foo_idx';
-  relname |  spcname  
- ---------+-----------
-  foo_idx | testspace
+  relname |     spcname      
+ ---------+------------------
+  foo_idx | regress_tblspace
  (1 row)
  
  -- let's try moving a table from one place to another
@@@ -77,33 -77,34 +77,33 @@@ SELECT COUNT(*) FROM testschema.atable
  (1 row)
  
  -- Will fail with bad path
- CREATE TABLESPACE badspace LOCATION '/no/such/location';
+ CREATE TABLESPACE regress_badspace LOCATION '/no/such/location';
  ERROR:  directory "/no/such/location" does not exist
  -- No such tablespace
- CREATE TABLE bar (i int) TABLESPACE nosuchspace;
- ERROR:  tablespace "nosuchspace" does not exist
+ CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace;
+ ERROR:  tablespace "regress_nosuchspace" does not exist
  -- Fail, not empty
- DROP TABLESPACE testspace;
- ERROR:  tablespace "testspace" is not empty
- CREATE ROLE tablespace_testuser1 login;
- CREATE ROLE tablespace_testuser2 login;
- ALTER TABLESPACE testspace OWNER TO tablespace_testuser1;
- SET SESSION ROLE tablespace_testuser2;
- CREATE TABLE tablespace_table (i int) TABLESPACE testspace; -- fail
- ERROR:  permission denied for tablespace testspace
+ DROP TABLESPACE regress_tblspace;
+ ERROR:  tablespace "regress_tblspace" is not empty
+ CREATE ROLE regress_tablespace_user1 login;
+ CREATE ROLE regress_tablespace_user2 login;
+ ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1;
+ SET SESSION ROLE regress_tablespace_user2;
+ CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail
+ ERROR:  permission denied for tablespace regress_tblspace
  RESET ROLE;
- ALTER TABLESPACE testspace RENAME TO testspace_renamed;
- ALTER TABLE ALL IN TABLESPACE testspace_renamed SET TABLESPACE pg_default;
- ALTER INDEX ALL IN TABLESPACE testspace_renamed SET TABLESPACE pg_default;
+ ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed;
+ ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default;
+ ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default;
  -- Should show notice that nothing was done
- ALTER TABLE ALL IN TABLESPACE testspace_renamed SET TABLESPACE pg_default;
- NOTICE:  no matching relations in tablespace "testspace_renamed" found
+ ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default;
+ NOTICE:  no matching relations in tablespace "regress_tblspace_renamed" found
  -- Should succeed
- DROP TABLESPACE testspace_renamed;
+ DROP TABLESPACE regress_tblspace_renamed;
  DROP SCHEMA testschema CASCADE;
 -NOTICE:  drop cascades to 4 other objects
 +NOTICE:  drop cascades to 3 other objects
  DETAIL:  drop cascades to table testschema.foo
  drop cascades to table testschema.asselect
 -drop cascades to table testschema.asexecute
  drop cascades to table testschema.atable
- DROP ROLE tablespace_testuser1;
- DROP ROLE tablespace_testuser2;
+ DROP ROLE regress_tablespace_user1;
+ DROP ROLE regress_tablespace_user2;
index dcda4dc7fb8f170b777d20e73bebc2694ac1f4ff,3815182fe7a8b14b3d89bd0d052ddc342aeb9939..ed93ed9104bd66ee1626c4bccb3fc81e63583319
@@@ -100,14 -97,7 +100,14 @@@ test: rules psql_crosstab select_parall
  # ----------
  # Another group of parallel tests
  # ----------
- test: select_views portals_p2 foreign_key cluster dependency guc bitmapops combocid tsearch tsdicts foreign_data window xmlmap functional_deps json jsonb indirect_toast equivclass
 -test: select_views portals_p2 foreign_key cluster dependency guc bitmapops combocid tsearch tsdicts foreign_data window xmlmap functional_deps advisory_lock json jsonb json_encoding indirect_toast equivclass
++test: select_views portals_p2 foreign_key cluster dependency guc bitmapops combocid tsearch tsdicts foreign_data window xmlmap functional_deps json jsonb json_encoding indirect_toast equivclass
 +
 +# ----------
 +# As XL uses advisory locks internally running this test separately.
 +# ----------
 +test: advisory_lock
 +#Separate out as similar table foo is created in others below.
 +test: rangefuncs
  # ----------
  # Another group of parallel tests
  # NB: temp.sql does a reconnect which transiently uses 2 connections,
index ec1871770ce5af035e4169c09415386ddf3139bd,574f5b87bee27de3e31335205e38595bdfe63b80..dc2b47539e31fbb9aa0ed443abb3c79482338507
@@@ -3013,9 -2184,9 +3013,11 @@@ regression_main(int argc, char *argv[]
  
        if (temp_instance)
        {
 +#ifndef PGXC
                FILE       *pg_conf;
 +#endif
+               const char *env_wait;
+               int                     wait_seconds;
  
                /*
                 * Prepare the temp instance
                                        progname, strerror(errno));
                        exit(2);
                }
 +#endif
  
                /*
-                * Wait till postmaster is able to accept connections (normally only a
-                * second or so, but Cygwin is reportedly *much* slower).  Don't wait
-                * forever, however.
+                * Wait till postmaster is able to accept connections; normally this
+                * is only a second or so, but Cygwin is reportedly *much* slower, and
+                * test builds using Valgrind or similar tools might be too.  Hence,
+                * allow the default timeout of 60 seconds to be overridden from the
+                * PGCTLTIMEOUT environment variable.
                 */
-               for (i = 0; i < 60; i++)
+               env_wait = getenv("PGCTLTIMEOUT");
+               if (env_wait != NULL)
+               {
+                       wait_seconds = atoi(env_wait);
+                       if (wait_seconds <= 0)
+                               wait_seconds = 60;
+               }
+               else
+                       wait_seconds = 60;
+               for (i = 0; i < wait_seconds; i++)
                {
 +
 +#ifdef PGXC
 +                      /* Done if psql succeeds for each node */
 +                      if (check_node_running(PGXC_COORD_1) &&
 +                              check_node_running(PGXC_COORD_2) &&
 +                              check_node_running(PGXC_DATANODE_1) &&
 +                              check_node_running(PGXC_DATANODE_2))
 +                              break;
 +
 +                      /* Check node failure */
 +                      check_node_fail(PGXC_COORD_1);
 +                      check_node_fail(PGXC_COORD_2);
 +                      check_node_fail(PGXC_DATANODE_1);
 +                      check_node_fail(PGXC_DATANODE_2);
 +#else
                        /* Done if psql succeeds */
                        if (system(buf2) == 0)
                                break;
  
                        pg_usleep(1000000L);
                }
-               if (i >= 60)
+               if (i >= wait_seconds)
                {
-                       fprintf(stderr, _("\n%s: postmaster did not respond within 60 seconds\nExamine %s/log/postmaster.log for the reason\n"), progname, outputdir);
 +#ifdef PGXC
 +                      /* If one node fails, all fail */
 +                      kill_node(PGXC_COORD_1);
 +                      kill_node(PGXC_COORD_2);
 +                      kill_node(PGXC_DATANODE_1);
 +                      kill_node(PGXC_DATANODE_2);
 +#else
+                       fprintf(stderr, _("\n%s: postmaster did not respond within %d seconds\nExamine %s/log/postmaster.log for the reason\n"),
+                                       progname, wait_seconds, outputdir);
  
                        /*
                         * If we get here, the postmaster is probably wedged somewhere in
index f627615a3ce951810559607e9da952a775cd2b78,8958d8cdb9d29a46f6bc21a6b0069390794c41e1..de2d0a5a592d94030c9dd6f1c5e2ebbae97bb6a9
@@@ -87,8 -89,8 +89,7 @@@ test: unio
  test: case
  test: join
  test: aggregates
- test: groupingsets
  test: transactions
 -ignore: random
  test: random
  test: portals
  test: arrays
index 4aa348af9c8f50f836b23ab63e7032cd39ec28f7,9983ff3a8962f749f265d60ea562963ec56b48cc..2fcace3646d04c3cfe5946a23dccbb7df8fbb65b
@@@ -226,27 -222,35 +226,35 @@@ FROM bool_test
  --
  
  -- Basic cases
 -explain (costs off)
 +explain (costs off, nodes off)
    select min(unique1) from tenk1;
  select min(unique1) from tenk1;
 -explain (costs off)
 +explain (costs off, nodes off)
    select max(unique1) from tenk1;
  select max(unique1) from tenk1;
 -explain (costs off)
 +explain (costs off, nodes off)
    select max(unique1) from tenk1 where unique1 < 42;
  select max(unique1) from tenk1 where unique1 < 42;
 -explain (costs off)
 +explain (costs off, nodes off)
    select max(unique1) from tenk1 where unique1 > 42;
  select max(unique1) from tenk1 where unique1 > 42;
- explain (costs off, nodes off)
+ -- the planner may choose a generic aggregate here if parallel query is
+ -- enabled, since that plan will be parallel safe and the "optimized"
+ -- plan, which has almost identical cost, will not be.  we want to test
+ -- the optimized plan, so temporarily disable parallel query.
+ begin;
+ set local max_parallel_workers_per_gather = 0;
+ explain (costs off)
    select max(unique1) from tenk1 where unique1 > 42000;
  select max(unique1) from tenk1 where unique1 > 42000;
+ rollback;
  
  -- multi-column index (uses tenk1_thous_tenthous)
 -explain (costs off)
 +explain (costs off, nodes off)
    select max(tenthous) from tenk1 where thousand = 33;
  select max(tenthous) from tenk1 where thousand = 33;
 -explain (costs off)
 +explain (costs off, nodes off)
    select min(tenthous) from tenk1 where thousand = 33;
  select min(tenthous) from tenk1 where thousand = 33;
  
@@@ -596,17 -635,167 +640,181 @@@ drop view aggordview1
  select least_agg(q1,q2) from int8_tbl;
  select least_agg(variadic array[q1,q2]) from int8_tbl;
  
 +-- int8 aggregates for distributed tables
 +
 +CREATE TABLE int8_tbl_aggtest AS SELECT * FROM int8_tbl;
 +
 +SELECT avg(q1) FROM int8_tbl_aggtest;
 +SELECT sum(q1) FROM int8_tbl_aggtest;
 +SELECT max(q1) FROM int8_tbl_aggtest;
 +SELECT min(q1) FROM int8_tbl_aggtest;
 +SELECT stddev_pop(q1) FROM int8_tbl_aggtest;
 +SELECT stddev_samp(q1) FROM int8_tbl_aggtest;
 +SELECT var_pop(q1) FROM int8_tbl_aggtest;
 +SELECT var_samp(q1) FROM int8_tbl_aggtest;
 +
 +DROP TABLE int8_tbl_aggtest;
+ -- test aggregates with common transition functions share the same states
+ begin work;
+ create type avg_state as (total bigint, count bigint);
+ create or replace function avg_transfn(state avg_state, n int) returns avg_state as
+ $$
+ declare new_state avg_state;
+ begin
+       raise notice 'avg_transfn called with %', n;
+       if state is null then
+               if n is not null then
+                       new_state.total := n;
+                       new_state.count := 1;
+                       return new_state;
+               end if;
+               return null;
+       elsif n is not null then
+               state.total := state.total + n;
+               state.count := state.count + 1;
+               return state;
+       end if;
+       return null;
+ end
+ $$ language plpgsql;
+ create function avg_finalfn(state avg_state) returns int4 as
+ $$
+ begin
+       if state is null then
+               return NULL;
+       else
+               return state.total / state.count;
+       end if;
+ end
+ $$ language plpgsql;
+ create function sum_finalfn(state avg_state) returns int4 as
+ $$
+ begin
+       if state is null then
+               return NULL;
+       else
+               return state.total;
+       end if;
+ end
+ $$ language plpgsql;
+ create aggregate my_avg(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = avg_finalfn
+ );
+ create aggregate my_sum(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = sum_finalfn
+ );
+ -- aggregate state should be shared as aggs are the same.
+ select my_avg(one),my_avg(one) from (values(1),(3)) t(one);
+ -- aggregate state should be shared as transfn is the same for both aggs.
+ select my_avg(one),my_sum(one) from (values(1),(3)) t(one);
+ -- shouldn't share states due to the distinctness not matching.
+ select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one);
+ -- shouldn't share states due to the filter clause not matching.
+ select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one);
+ -- this should not share the state due to different input columns.
+ select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two);
+ -- test that aggs with the same sfunc and initcond share the same agg state
+ create aggregate my_sum_init(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = sum_finalfn,
+    initcond = '(10,0)'
+ );
+ create aggregate my_avg_init(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = avg_finalfn,
+    initcond = '(10,0)'
+ );
+ create aggregate my_avg_init2(int4)
+ (
+    stype = avg_state,
+    sfunc = avg_transfn,
+    finalfunc = avg_finalfn,
+    initcond = '(4,0)'
+ );
+ -- state should be shared if INITCONDs are matching
+ select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one);
+ -- Varying INITCONDs should cause the states not to be shared.
+ select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one);
+ rollback;
+ -- test aggregate state sharing to ensure it works if one aggregate has a
+ -- finalfn and the other one has none.
+ begin work;
+ create or replace function sum_transfn(state int4, n int4) returns int4 as
+ $$
+ declare new_state int4;
+ begin
+       raise notice 'sum_transfn called with %', n;
+       if state is null then
+               if n is not null then
+                       new_state := n;
+                       return new_state;
+               end if;
+               return null;
+       elsif n is not null then
+               state := state + n;
+               return state;
+       end if;
+       return null;
+ end
+ $$ language plpgsql;
+ create function halfsum_finalfn(state int4) returns int4 as
+ $$
+ begin
+       if state is null then
+               return NULL;
+       else
+               return state / 2;
+       end if;
+ end
+ $$ language plpgsql;
+ create aggregate my_sum(int4)
+ (
+    stype = int4,
+    sfunc = sum_transfn
+ );
+ create aggregate my_half_sum(int4)
+ (
+    stype = int4,
+    sfunc = sum_transfn,
+    finalfunc = halfsum_finalfn
+ );
+ -- Agg state should be shared even though my_sum has no finalfn
+ select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one);
+ rollback;
index 11e99ceca9b94ffae9502665c10642b27d8b77ca,72e65d4ee052e6708f8e127b75db3e726d973cdd..d162d2f33cd49e3293906c69e107ac41efd4e4a4
@@@ -332,14 -335,18 +335,18 @@@ create table nv_child_2010 () inherits 
  create table nv_child_2011 () inherits (nv_parent);
  alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid;
  alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid;
 -explain (costs off) select * from nv_parent where d between '2011-08-01' and '2011-08-31';
 +explain (costs off, nodes off) select * from nv_parent where d between '2011-08-01' and '2011-08-31';
  create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent);
 -explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date;
 -explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
 +explain (costs off, nodes off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date;
 +explain (costs off, nodes off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
  -- after validation, the constraint should be used
  alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check;
 -explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
 +explain (costs off, nodes off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
  
+ -- add an inherited NOT VALID constraint
+ alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid;
+ \d nv_child_2009
+ -- we leave nv_parent and children around to help test pg_dump logic
  
  -- Foreign key adding test with mixed types
  
@@@ -899,8 -906,19 +906,19 @@@ select * from child
  drop table child;
  drop table parent;
  
+ -- check error cases for inheritance column merging
+ create table parent (a float8, b numeric(10,4), c text collate "C");
+ create table child (a float4) inherits (parent); -- fail
+ create table child (b decimal(10,7)) inherits (parent); -- fail
+ create table child (c text collate "POSIX") inherits (parent); -- fail
+ create table child (a double precision, b decimal(10,4)) inherits (parent);
+ drop table child;
+ drop table parent;
  -- test copy in/out
 -create table test (a int4, b int4, c int4);
 +create table test (a int4, b int4, c int4) distribute by roundrobin;
  insert into test values (1,2,3);
  alter table test drop a;
  copy test to stdout;
index 3ea341476a350800de3f89d7337cde7f8acbad9d,a2c3db112742f093e37989961f9e40d6829707fe..0b52fe9ad8e53384f83bb33ec2c2e5793851cdef
@@@ -89,16 -84,56 +89,57 @@@ SELECT a[1:3]
            b[1:1][1:2][1:2],
            c[1:2],
            d[1:1][2:2]
 -   FROM arrtest;
 +   FROM arrtest 
 +   ORDER BY a, b, c;
  
+ SELECT b[1:1][2][2],
+        d[1:1][2]
+    FROM arrtest;
  INSERT INTO arrtest(a) VALUES('{1,null,3}');
 -SELECT a FROM arrtest;
 +SELECT a FROM arrtest ORDER BY 1;
  UPDATE arrtest SET a[4] = NULL WHERE a[2] IS NULL;
 -SELECT a FROM arrtest WHERE a[2] IS NULL;
 +SELECT a FROM arrtest WHERE a[2] IS NULL ORDER BY 1;
  DELETE FROM arrtest WHERE a[2] IS NULL AND b IS NULL;
 -SELECT a,b,c FROM arrtest;
 +SELECT a,b,c FROM arrtest ORDER BY a, b, c;
  
+ -- test mixed slice/scalar subscripting
+ select '{{1,2,3},{4,5,6},{7,8,9}}'::int[];
+ select ('{{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
+ select '[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[];
+ select ('[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
+ -- test slices with empty lower and/or upper index
+ CREATE TEMP TABLE arrtest_s (
+   a       int2[],
+   b       int2[][]
+ );
+ INSERT INTO arrtest_s VALUES ('{1,2,3,4,5}', '{{1,2,3}, {4,5,6}, {7,8,9}}');
+ INSERT INTO arrtest_s VALUES ('[0:4]={1,2,3,4,5}', '[0:2][0:2]={{1,2,3}, {4,5,6}, {7,8,9}}');
+ SELECT * FROM arrtest_s;
+ SELECT a[:3], b[:2][:2] FROM arrtest_s;
+ SELECT a[2:], b[2:][2:] FROM arrtest_s;
+ SELECT a[:], b[:] FROM arrtest_s;
+ -- updates
+ UPDATE arrtest_s SET a[:3] = '{11, 12, 13}', b[:2][:2] = '{{11,12}, {14,15}}'
+   WHERE array_lower(a,1) = 1;
+ SELECT * FROM arrtest_s;
+ UPDATE arrtest_s SET a[3:] = '{23, 24, 25}', b[2:][2:] = '{{25,26}, {28,29}}';
+ SELECT * FROM arrtest_s;
+ UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}';
+ SELECT * FROM arrtest_s;
+ UPDATE arrtest_s SET a[:] = '{23, 24, 25}';  -- fail, too small
+ INSERT INTO arrtest_s VALUES(NULL, NULL);
+ UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}';  -- fail, no good with null
+ -- check with fixed-length-array type, such as point
+ SELECT f1[0:1] FROM POINT_TBL;
+ SELECT f1[0:] FROM POINT_TBL;
+ SELECT f1[:1] FROM POINT_TBL;
+ SELECT f1[:] FROM POINT_TBL;
  --
  -- test array extension
  --
index 464a34e631b1e58705d9d152cc4bee708d01bacc,128a016963520e854812922750ca850356f15a12..a1838c624b7f9fc2263a56aebeb7d238e758a145
@@@ -114,8 -113,69 +114,70 @@@ SELECT '' AS four, @@(b1.f1) AS 
  
  -- wholly-contained
  SELECT '' AS one, b1.*, b2.*
 -   FROM BOX_TBL b1, BOX_TBL b2
 -   WHERE b1.f1 @> b2.f1 and not b1.f1 ~= b2.f1;
 +   FROM BOX_TBL b1, BOX_TBL b2 
 +   WHERE b1.f1 @> b2.f1 and not b1.f1 ~= b2.f1 
 +   ORDER BY (b1.f1[0])[0], (b1.f1[0])[1], (b1.f1[2])[0], (b1.f1[2])[1], (b2.f1[0])[0], (b2.f1[0])[1], (b2.f1[2])[0], (b2.f1[2])[1]; 
  
 -SELECT '' AS four, height(f1), width(f1) FROM BOX_TBL;
 +SELECT '' AS four, height(f1), width(f1) FROM BOX_TBL ORDER BY (f1[0])[0], (f1[0])[1], (f1[2])[0], (f1[2])[1];
+ --
+ -- Test the SP-GiST index
+ --
+ CREATE TEMPORARY TABLE box_temp (f1 box);
+ INSERT INTO box_temp
+       SELECT box(point(i, i), point(i * 2, i * 2))
+       FROM generate_series(1, 50) AS i;
+ CREATE INDEX box_spgist ON box_temp USING spgist (f1);
+ INSERT INTO box_temp
+       VALUES (NULL),
+                  ('(0,0)(0,100)'),
+                  ('(-3,4.3333333333)(40,1)'),
+                  ('(0,100)(0,infinity)'),
+                  ('(-infinity,0)(0,infinity)'),
+                  ('(-infinity,-infinity)(infinity,infinity)');
+ SET enable_seqscan = false;
+ SELECT * FROM box_temp WHERE f1 << '(10,20),(30,40)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 << '(10,20),(30,40)';
+ SELECT * FROM box_temp WHERE f1 &< '(10,4.333334),(5,100)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 &< '(10,4.333334),(5,100)';
+ SELECT * FROM box_temp WHERE f1 && '(15,20),(25,30)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 && '(15,20),(25,30)';
+ SELECT * FROM box_temp WHERE f1 &> '(40,30),(45,50)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 &> '(40,30),(45,50)';
+ SELECT * FROM box_temp WHERE f1 >> '(30,40),(40,30)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 >> '(30,40),(40,30)';
+ SELECT * FROM box_temp WHERE f1 <<| '(10,4.33334),(5,100)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 <<| '(10,4.33334),(5,100)';
+ SELECT * FROM box_temp WHERE f1 &<| '(10,4.3333334),(5,1)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 &<| '(10,4.3333334),(5,1)';
+ SELECT * FROM box_temp WHERE f1 |&> '(49.99,49.99),(49.99,49.99)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 |&> '(49.99,49.99),(49.99,49.99)';
+ SELECT * FROM box_temp WHERE f1 |>> '(37,38),(39,40)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 |>> '(37,38),(39,40)';
+ SELECT * FROM box_temp WHERE f1 @> '(10,11),(15,16)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 @> '(10,11),(15,15)';
+ SELECT * FROM box_temp WHERE f1 <@ '(10,15),(30,35)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 <@ '(10,15),(30,35)';
+ SELECT * FROM box_temp WHERE f1 ~= '(20,20),(40,40)';
+ EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 ~= '(20,20),(40,40)';
+ RESET enable_seqscan;
+ DROP INDEX box_spgist;
index c311e8151bce2d2674dfb48108328248ba1d5613,b2377e46109a6ccbf22659a976bae64b7c4f2baf..ed76878217fa8b573588c274afe7cce83e07a2da
@@@ -162,8 -154,46 +162,46 @@@ UPDATE CASE_TB
    FROM CASE2_TBL b
    WHERE j = -CASE_TBL.i;
  
 -SELECT * FROM CASE_TBL;
 +SELECT * FROM CASE_TBL ORDER BY i, f;
  
+ --
+ -- Nested CASE expressions
+ --
+ -- This test exercises a bug caused by aliasing econtext->caseValue_isNull
+ -- with the isNull argument of the inner CASE's ExecEvalCase() call.  After
+ -- evaluating the vol(null) expression in the inner CASE's second WHEN-clause,
+ -- the isNull flag for the case test value incorrectly became true, causing
+ -- the third WHEN-clause not to match.  The volatile function calls are needed
+ -- to prevent constant-folding in the planner, which would hide the bug.
+ CREATE FUNCTION vol(text) returns text as
+   'begin return $1; end' language plpgsql volatile;
+ SELECT CASE
+   (CASE vol('bar')
+     WHEN 'foo' THEN 'it was foo!'
+     WHEN vol(null) THEN 'null input'
+     WHEN 'bar' THEN 'it was bar!' END
+   )
+   WHEN 'it was foo!' THEN 'foo recognized'
+   WHEN 'it was bar!' THEN 'bar recognized'
+   ELSE 'unrecognized' END;
+ -- In this case, we can't inline the SQL function without confusing things.
+ CREATE DOMAIN foodomain AS text;
+ CREATE FUNCTION volfoo(text) returns foodomain as
+   'begin return $1::foodomain; end' language plpgsql volatile;
+ CREATE FUNCTION inline_eq(foodomain, foodomain) returns boolean as
+   'SELECT CASE $2::text WHEN $1::text THEN true ELSE false END' language sql;
+ CREATE OPERATOR = (procedure = inline_eq,
+                    leftarg = foodomain, rightarg = foodomain);
+ SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' END;
  --
  -- Clean up
  --
index 99ae7cdddee069f83483c4714f376c5a86567061,8dd9459bda00c131a8477f2ecb5f0b204c144b10..3532c360c10cb7e6f10830c2632e62405731b577
@@@ -198,11 -191,43 +198,43 @@@ SET SESSION AUTHORIZATION clstr_user
  create temp table clstr_temp (col1 int primary key, col2 text);
  insert into clstr_temp values (2, 'two'), (1, 'one');
  cluster clstr_temp using clstr_temp_pkey;
 -select * from clstr_temp;
 +select * from clstr_temp order by 1;
  drop table clstr_temp;
  
+ RESET SESSION AUTHORIZATION;
+ -- Test CLUSTER with external tuplesorting
+ create table clstr_4 as select * from tenk1;
+ create index cluster_sort on clstr_4 (hundred, thousand, tenthous);
+ -- ensure we don't use the index in CLUSTER nor the checking SELECTs
+ set enable_indexscan = off;
+ -- Use external sort that only ever uses quicksort to sort runs:
+ set maintenance_work_mem = '1MB';
+ set replacement_sort_tuples = 0;
+ cluster clstr_4 using cluster_sort;
+ select * from
+ (select hundred, lag(hundred) over () as lhundred,
+         thousand, lag(thousand) over () as lthousand,
+         tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss
+ where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous);
+ -- Replacement selection will now be forced.  It should only produce a single
+ -- run, due to the fact that input is found to be presorted:
+ set replacement_sort_tuples = 150000;
+ cluster clstr_4 using cluster_sort;
+ select * from
+ (select hundred, lag(hundred) over () as lhundred,
+         thousand, lag(thousand) over () as lthousand,
+         tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss
+ where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous);
+ reset enable_indexscan;
+ reset maintenance_work_mem;
+ reset replacement_sort_tuples;
  -- clean up
- \c -
  DROP TABLE clustertest;
  DROP TABLE clstr_1;
  DROP TABLE clstr_2;
Simple merge
Simple merge
Simple merge
Simple merge
index ad9d5a5495ddfa9fa0ce845b139f5b19b50a216b,215e7a478499a1cebb91c19f571dfbdbbb7a29f8..5d6c103afec16f5e3f6bb0b38d2e2da406e281d9
@@@ -166,4 -166,50 +166,50 @@@ INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.
  
  INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
  
 -SELECT '' AS five, * FROM FLOAT8_TBL;
 +SELECT '' AS five, * FROM FLOAT8_TBL ORDER BY f1;
+ -- test exact cases for trigonometric functions in degrees
+ SET extra_float_digits = 3;
+ SELECT x,
+        sind(x),
+        sind(x) IN (-1,-0.5,0,0.5,1) AS sind_exact
+ FROM (VALUES (0), (30), (90), (150), (180),
+       (210), (270), (330), (360)) AS t(x);
+ SELECT x,
+        cosd(x),
+        cosd(x) IN (-1,-0.5,0,0.5,1) AS cosd_exact
+ FROM (VALUES (0), (60), (90), (120), (180),
+       (240), (270), (300), (360)) AS t(x);
+ SELECT x,
+        tand(x),
+        tand(x) IN ('-Infinity'::float8,-1,0,
+                    1,'Infinity'::float8) AS tand_exact,
+        cotd(x),
+        cotd(x) IN ('-Infinity'::float8,-1,0,
+                    1,'Infinity'::float8) AS cotd_exact
+ FROM (VALUES (0), (45), (90), (135), (180),
+       (225), (270), (315), (360)) AS t(x);
+ SELECT x,
+        asind(x),
+        asind(x) IN (-90,-30,0,30,90) AS asind_exact,
+        acosd(x),
+        acosd(x) IN (0,60,90,120,180) AS acosd_exact
+ FROM (VALUES (-1), (-0.5), (0), (0.5), (1)) AS t(x);
+ SELECT x,
+        atand(x),
+        atand(x) IN (-90,-45,0,45,90) AS atand_exact
+ FROM (VALUES ('-Infinity'::float8), (-1), (0), (1),
+       ('Infinity'::float8)) AS t(x);
+ SELECT x, y,
+        atan2d(y, x),
+        atan2d(y, x) IN (-90,0,90,180) AS atan2d_exact
+ FROM (SELECT 10*cosd(a), 10*sind(a)
+       FROM generate_series(0, 360, 90) AS t(a)) AS t(x,y);
+ RESET extra_float_digits;
Simple merge
Simple merge
Simple merge
index 316bac241cd618b04c1b0a7900ef64dcfe430b4a,7924d5d46deeddb1325e865d4888f3446aa5dbd9..2fa5971757d93dd34c952b69b28043364d581963
@@@ -33,6 -33,54 +33,54 @@@ select * from inserttest order by 1,2,3
  --
  insert into inserttest values(30, 50, repeat('x', 10000));
  
 -select col1, col2, char_length(col3) from inserttest;
 +select col1, col2, char_length(col3) from inserttest order by 1,2,3;
  
  drop table inserttest;
+ --
+ -- check indirection (field/array assignment), cf bug #14265
+ --
+ -- these tests are aware that transformInsertStmt has 3 separate code paths
+ --
+ create type insert_test_type as (if1 int, if2 text[]);
+ create table inserttest (f1 int, f2 int[],
+                          f3 insert_test_type, f4 insert_test_type[]);
+ insert into inserttest (f2[1], f2[2]) values (1,2);
+ insert into inserttest (f2[1], f2[2]) values (3,4), (5,6);
+ insert into inserttest (f2[1], f2[2]) select 7,8;
+ insert into inserttest (f2[1], f2[2]) values (1,default);  -- not supported
+ insert into inserttest (f3.if1, f3.if2) values (1,array['foo']);
+ insert into inserttest (f3.if1, f3.if2) values (1,'{foo}'), (2,'{bar}');
+ insert into inserttest (f3.if1, f3.if2) select 3, '{baz,quux}';
+ insert into inserttest (f3.if1, f3.if2) values (1,default);  -- not supported
+ insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar');
+ insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar'), ('baz', 'quux');
+ insert into inserttest (f3.if2[1], f3.if2[2]) select 'bear', 'beer';
+ insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar');
+ insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar'), ('baz', 'quux');
+ insert into inserttest (f4[1].if2[1], f4[1].if2[2]) select 'bear', 'beer';
+ select * from inserttest;
+ -- also check reverse-listing
+ create table inserttest2 (f1 bigint, f2 text);
+ create rule irule1 as on insert to inserttest2 do also
+   insert into inserttest (f3.if2[1], f3.if2[2])
+   values (new.f1,new.f2);
+ create rule irule2 as on insert to inserttest2 do also
+   insert into inserttest (f4[1].if1, f4[1].if2[2])
+   values (1,'fool'),(new.f1,new.f2);
+ create rule irule3 as on insert to inserttest2 do also
+   insert into inserttest (f4[1].if1, f4[1].if2[2])
+   select new.f1, new.f2;
+ \d+ inserttest2
+ drop table inserttest2;
+ drop table inserttest;
+ drop type insert_test_type;
Simple merge
Simple merge
Simple merge
Simple merge
index 2ba84e924442ac9a3fc1b178f2dc3c7cf87000fd,5f3269def8a3558145d45fd1973c94e3bdcda161..1b1d3be97dbc36609c3fa76ae74ea8534c1ae719
@@@ -24,44 -24,45 +24,44 @@@ SELECT * FROM mvtest_tm
  
  -- create various views
  EXPLAIN (costs off)
-   CREATE MATERIALIZED VIEW tvm AS SELECT * FROM tv ORDER BY type;
- CREATE MATERIALIZED VIEW tvm AS SELECT * FROM tv ORDER BY type;
- SELECT * FROM tvm;
- CREATE MATERIALIZED VIEW tmm AS SELECT sum(totamt) AS grandtot FROM tm;
- CREATE MATERIALIZED VIEW tvmm AS SELECT sum(totamt) AS grandtot FROM tvm;
- CREATE UNIQUE INDEX tvmm_expr ON tvmm ((grandtot > 0));
- CREATE UNIQUE INDEX tvmm_pred ON tvmm (grandtot) WHERE grandtot < 0;
- CREATE VIEW tvv AS SELECT sum(totamt) AS grandtot FROM tv;
+   CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type;
+ CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type;
+ SELECT * FROM mvtest_tvm;
+ CREATE MATERIALIZED VIEW mvtest_tmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tm;
+ CREATE MATERIALIZED VIEW mvtest_tvmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tvm;
+ CREATE UNIQUE INDEX mvtest_tvmm_expr ON mvtest_tvmm ((grandtot > 0));
+ CREATE UNIQUE INDEX mvtest_tvmm_pred ON mvtest_tvmm (grandtot) WHERE grandtot < 0;
+ CREATE VIEW mvtest_tvv AS SELECT sum(totamt) AS grandtot FROM mvtest_tv;
  EXPLAIN (costs off)
-   CREATE MATERIALIZED VIEW tvvm AS SELECT * FROM tvv;
- CREATE MATERIALIZED VIEW tvvm AS SELECT * FROM tvv;
- CREATE VIEW tvvmv AS SELECT * FROM tvvm;
- CREATE MATERIALIZED VIEW bb AS SELECT * FROM tvvmv;
- CREATE INDEX aa ON bb (grandtot);
+   CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv;
+ CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv;
+ CREATE VIEW mvtest_tvvmv AS SELECT * FROM mvtest_tvvm;
+ CREATE MATERIALIZED VIEW mvtest_bb AS SELECT * FROM mvtest_tvvmv;
+ CREATE INDEX mvtest_aa ON mvtest_bb (grandtot);
  
  -- check that plans seem reasonable
- \d+ tvm
- \d+ tvm
- \d+ tvvm
- \d+ bb
+ \d+ mvtest_tvm
+ \d+ mvtest_tvm
+ \d+ mvtest_tvvm
+ \d+ mvtest_bb
  
  -- test schema behavior
- CREATE SCHEMA mvschema;
- ALTER MATERIALIZED VIEW tvm SET SCHEMA mvschema;
- \d+ tvm
- \d+ tvmm
- SET search_path = mvschema, public;
- \d+ tvm
+ CREATE SCHEMA mvtest_mvschema;
+ ALTER MATERIALIZED VIEW mvtest_tvm SET SCHEMA mvtest_mvschema;
+ \d+ mvtest_tvm
+ \d+ mvtest_tvmm
+ SET search_path = mvtest_mvschema, public;
+ \d+ mvtest_tvm
  
  -- modify the underlying table data
- INSERT INTO t VALUES (6, 'z', 13);
+ INSERT INTO mvtest_t VALUES (6, 'z', 13);
  
  -- confirm pre- and post-refresh contents of fairly simple materialized views
- SELECT * FROM tm ORDER BY type;
- SELECT * FROM tvm ORDER BY type;
- REFRESH MATERIALIZED VIEW tvm;
- SELECT * FROM tm ORDER BY type;
- SELECT * FROM tvm ORDER BY type;
+ SELECT * FROM mvtest_tm ORDER BY type;
+ SELECT * FROM mvtest_tvm ORDER BY type;
 -REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tm;
+ REFRESH MATERIALIZED VIEW mvtest_tvm;
+ SELECT * FROM mvtest_tm ORDER BY type;
+ SELECT * FROM mvtest_tvm ORDER BY type;
  RESET search_path;
  
  -- confirm pre- and post-refresh contents of nested materialized views
@@@ -119,87 -120,123 +119,119 @@@ CREATE MATERIALIZED VIEW mv_test2 AS SE
  CREATE MATERIALIZED VIEW mv_test3 AS SELECT * FROM mv_test2 WHERE moo = 12345;
  SELECT relispopulated FROM pg_class WHERE oid = 'mv_test3'::regclass;
  
- DROP VIEW v_test1 CASCADE;
+ DROP VIEW mvtest_vt1 CASCADE;
  
  -- test that vacuum does not make empty matview look unpopulated
- CREATE TABLE hoge (i int);
- INSERT INTO hoge VALUES (generate_series(1,100000));
- CREATE MATERIALIZED VIEW hogeview AS SELECT * FROM hoge WHERE i % 2 = 0;
- CREATE INDEX hogeviewidx ON hogeview (i);
- DELETE FROM hoge;
- REFRESH MATERIALIZED VIEW hogeview;
- SELECT * FROM hogeview WHERE i < 10;
- VACUUM ANALYZE hogeview;
- SELECT * FROM hogeview WHERE i < 10;
- DROP TABLE hoge CASCADE;
+ CREATE TABLE mvtest_huge (i int);
+ INSERT INTO mvtest_huge VALUES (generate_series(1,100000));
+ CREATE MATERIALIZED VIEW mvtest_hugeview AS SELECT * FROM mvtest_huge WHERE i % 2 = 0;
+ CREATE INDEX mvtest_hugeviewidx ON mvtest_hugeview (i);
+ DELETE FROM mvtest_huge;
+ REFRESH MATERIALIZED VIEW mvtest_hugeview;
+ SELECT * FROM mvtest_hugeview WHERE i < 10;
+ VACUUM ANALYZE mvtest_hugeview;
+ SELECT * FROM mvtest_hugeview WHERE i < 10;
+ DROP TABLE mvtest_huge CASCADE;
  
  -- test that duplicate values on unique index prevent refresh
- CREATE TABLE foo(a, b) AS VALUES(1, 10);
- CREATE MATERIALIZED VIEW mv AS SELECT * FROM foo;
- CREATE UNIQUE INDEX ON mv(a);
- INSERT INTO foo SELECT * FROM foo;
- REFRESH MATERIALIZED VIEW mv;
- DROP TABLE foo CASCADE;
+ CREATE TABLE mvtest_foo(a, b) AS VALUES(1, 10);
+ CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo;
+ CREATE UNIQUE INDEX ON mvtest_mv(a);
+ INSERT INTO mvtest_foo SELECT * FROM mvtest_foo;
+ REFRESH MATERIALIZED VIEW mvtest_mv;
+ REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv;
+ DROP TABLE mvtest_foo CASCADE;
  
  -- make sure that all columns covered by unique indexes works
- CREATE TABLE foo(a, b, c) AS VALUES(1, 2, 3);
- CREATE MATERIALIZED VIEW mv AS SELECT * FROM foo;
- CREATE UNIQUE INDEX ON mv (a);
- CREATE UNIQUE INDEX ON mv (b);
- CREATE UNIQUE INDEX on mv (c);
- INSERT INTO foo VALUES(2, 3, 4);
- INSERT INTO foo VALUES(3, 4, 5);
- REFRESH MATERIALIZED VIEW mv;
- DROP TABLE foo CASCADE;
+ CREATE TABLE mvtest_foo(a, b, c) AS VALUES(1, 2, 3);
+ CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo;
+ CREATE UNIQUE INDEX ON mvtest_mv (a);
+ CREATE UNIQUE INDEX ON mvtest_mv (b);
+ CREATE UNIQUE INDEX on mvtest_mv (c);
+ INSERT INTO mvtest_foo VALUES(2, 3, 4);
+ INSERT INTO mvtest_foo VALUES(3, 4, 5);
+ REFRESH MATERIALIZED VIEW mvtest_mv;
 -REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv;
+ DROP TABLE mvtest_foo CASCADE;
  
  -- allow subquery to reference unpopulated matview if WITH NO DATA is specified
- CREATE MATERIALIZED VIEW mv1 AS SELECT 1 AS col1 WITH NO DATA;
- CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM mv1
-   WHERE col1 = (SELECT LEAST(col1) FROM mv1) WITH NO DATA;
- DROP MATERIALIZED VIEW mv1 CASCADE;
+ CREATE MATERIALIZED VIEW mvtest_mv1 AS SELECT 1 AS col1 WITH NO DATA;
+ CREATE MATERIALIZED VIEW mvtest_mv2 AS SELECT * FROM mvtest_mv1
+   WHERE col1 = (SELECT LEAST(col1) FROM mvtest_mv1) WITH NO DATA;
+ DROP MATERIALIZED VIEW mvtest_mv1 CASCADE;
  
  -- make sure that types with unusual equality tests work
- CREATE TABLE boxes (id serial primary key, b box);
- INSERT INTO boxes (b) VALUES
+ CREATE TABLE mvtest_boxes (id serial primary key, b box);
+ INSERT INTO mvtest_boxes (b) VALUES
    ('(32,32),(31,31)'),
    ('(2.0000004,2.0000004),(1,1)'),
    ('(1.9999996,1.9999996),(1,1)');
- CREATE MATERIALIZED VIEW boxmv AS SELECT * FROM boxes;
- CREATE UNIQUE INDEX boxmv_id ON boxmv (id);
- UPDATE boxes SET b = '(2,2),(1,1)' WHERE id = 2;
- SELECT * FROM boxmv ORDER BY id;
- DROP TABLE boxes CASCADE;
+ CREATE MATERIALIZED VIEW mvtest_boxmv AS SELECT * FROM mvtest_boxes;
+ CREATE UNIQUE INDEX mvtest_boxmv_id ON mvtest_boxmv (id);
+ UPDATE mvtest_boxes SET b = '(2,2),(1,1)' WHERE id = 2;
 -REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_boxmv;
+ SELECT * FROM mvtest_boxmv ORDER BY id;
+ DROP TABLE mvtest_boxes CASCADE;
  
  -- make sure that column names are handled correctly
- CREATE TABLE v (i int, j int);
- CREATE MATERIALIZED VIEW mv_v (ii) AS SELECT i, j AS jj FROM v;
- ALTER TABLE v RENAME COLUMN i TO x;
- INSERT INTO v values (1, 2);
- CREATE UNIQUE INDEX mv_v_ii ON mv_v (ii);
- REFRESH MATERIALIZED VIEW mv_v;
- UPDATE v SET j = 3 WHERE x = 1;
- SELECT * FROM v;
- SELECT * FROM mv_v;
- DROP TABLE v CASCADE;
+ CREATE TABLE mvtest_v (i int, j int);
+ CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj, kk) AS SELECT i, j FROM mvtest_v; -- error
+ CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj) AS SELECT i, j FROM mvtest_v; -- ok
+ CREATE MATERIALIZED VIEW mvtest_mv_v_2 (ii) AS SELECT i, j FROM mvtest_v; -- ok
+ CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj, kk) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- error
+ CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok
+ CREATE MATERIALIZED VIEW mvtest_mv_v_4 (ii) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok
+ ALTER TABLE mvtest_v RENAME COLUMN i TO x;
+ INSERT INTO mvtest_v values (1, 2);
+ CREATE UNIQUE INDEX mvtest_mv_v_ii ON mvtest_mv_v (ii);
+ REFRESH MATERIALIZED VIEW mvtest_mv_v;
+ UPDATE mvtest_v SET j = 3 WHERE x = 1;
 -REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_v;
+ REFRESH MATERIALIZED VIEW mvtest_mv_v_2;
+ REFRESH MATERIALIZED VIEW mvtest_mv_v_3;
+ REFRESH MATERIALIZED VIEW mvtest_mv_v_4;
+ SELECT * FROM mvtest_v;
+ SELECT * FROM mvtest_mv_v;
+ SELECT * FROM mvtest_mv_v_2;
+ SELECT * FROM mvtest_mv_v_3;
+ SELECT * FROM mvtest_mv_v_4;
+ DROP TABLE mvtest_v CASCADE;
+ -- make sure that create WITH NO DATA does not plan the query (bug #13907)
+ create materialized view mvtest_error as select 1/0 as x;  -- fail
+ create materialized view mvtest_error as select 1/0 as x with no data;
+ refresh materialized view mvtest_error;  -- fail here
+ drop materialized view mvtest_error;
  
  -- make sure that matview rows can be referenced as source rows (bug #9398)
- CREATE TABLE v AS SELECT generate_series(1,10) AS a;
- CREATE MATERIALIZED VIEW mv_v AS SELECT a FROM v WHERE a <= 5;
- DELETE FROM v WHERE EXISTS ( SELECT * FROM mv_v WHERE mv_v.a = v.a );
- SELECT * FROM v;
- SELECT * FROM mv_v;
- DROP TABLE v CASCADE;
+ CREATE TABLE mvtest_v AS SELECT generate_series(1,10) AS a;
+ CREATE MATERIALIZED VIEW mvtest_mv_v AS SELECT a FROM mvtest_v WHERE a <= 5;
+ DELETE FROM mvtest_v WHERE EXISTS ( SELECT * FROM mvtest_mv_v WHERE mvtest_mv_v.a = mvtest_v.a );
+ SELECT * FROM mvtest_v;
+ SELECT * FROM mvtest_mv_v;
+ DROP TABLE mvtest_v CASCADE;
  
  -- make sure running as superuser works when MV owned by another role (bug #11208)
- CREATE ROLE user_dw;
- SET ROLE user_dw;
- CREATE TABLE foo_data AS SELECT i, md5(random()::text)
+ CREATE ROLE regress_user_mvtest;
+ SET ROLE regress_user_mvtest;
+ CREATE TABLE mvtest_foo_data AS SELECT i, md5(random()::text)
    FROM generate_series(1, 10) i;
- CREATE MATERIALIZED VIEW mv_foo AS SELECT * FROM foo_data;
- CREATE MATERIALIZED VIEW mv_foo AS SELECT * FROM foo_data;
- CREATE MATERIALIZED VIEW IF NOT EXISTS mv_foo AS SELECT * FROM foo_data;
- CREATE UNIQUE INDEX ON mv_foo (i);
+ CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ CREATE MATERIALIZED VIEW IF NOT EXISTS mvtest_mv_foo AS SELECT * FROM mvtest_foo_data;
+ CREATE UNIQUE INDEX ON mvtest_mv_foo (i);
  RESET ROLE;
- REFRESH MATERIALIZED VIEW mv_foo;
- DROP OWNED BY user_dw CASCADE;
- DROP ROLE user_dw;
+ REFRESH MATERIALIZED VIEW mvtest_mv_foo;
 -REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_foo;
+ DROP OWNED BY regress_user_mvtest CASCADE;
+ DROP ROLE regress_user_mvtest;
+ -- make sure that create WITH NO DATA works via SPI
+ BEGIN;
+ CREATE FUNCTION mvtest_func()
+   RETURNS void AS $$
+ BEGIN
+   CREATE MATERIALIZED VIEW mvtest1 AS SELECT 1 AS x;
+   CREATE MATERIALIZED VIEW mvtest2 AS SELECT 1 AS x WITH NO DATA;
+ END;
+ $$ LANGUAGE plpgsql;
+ SELECT mvtest_func();
+ SELECT * FROM mvtest1;
+ SELECT * FROM mvtest2;
+ ROLLBACK;
index 0dd2fcd4fafb6c58dea8de40751b6f526c4cc1f7,602bf26a48c6894c19bf2e3745183c6a6fed07a2..66c80d9ecaf27149765c4970afbcc13103144fca
@@@ -35,20 -35,53 +35,53 @@@ SELECT '' AS six, c.f1 FROM NAME_TBL c 
  
  SELECT '' AS one, c.f1 FROM NAME_TBL c WHERE c.f1 = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
  
 -SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 < '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
 +SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 < '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR' ORDER BY f1; 
  
 -SELECT '' AS four, c.f1 FROM NAME_TBL c WHERE c.f1 <= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
 +SELECT '' AS four, c.f1 FROM NAME_TBL c WHERE c.f1 <= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR' ORDER BY f1;
  
 -SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 > '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
 +SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 > '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR' ORDER BY f1;
  
 -SELECT '' AS four, c.f1 FROM NAME_TBL c WHERE c.f1 >= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
 +SELECT '' AS four, c.f1 FROM NAME_TBL c WHERE c.f1 >= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR' ORDER BY f1;
  
 -SELECT '' AS seven, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*';
 +SELECT '' AS seven, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*' ORDER BY f1;
  
 -SELECT '' AS zero, c.f1 FROM NAME_TBL c WHERE c.f1 !~ '.*';
 +SELECT '' AS zero, c.f1 FROM NAME_TBL c WHERE c.f1 !~ '.*' ORDER BY f1;
  
 -SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '[0-9]';
 +SELECT '' AS three, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '[0-9]' ORDER BY f1;
  
 -SELECT '' AS two, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*asdf.*';
 +SELECT '' AS two, c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*asdf.*' ORDER BY f1;
  
  DROP TABLE NAME_TBL;
+ DO $$
+ DECLARE r text[];
+ BEGIN
+   r := parse_ident('Schemax.Tabley');
+   RAISE NOTICE '%', format('%I.%I', r[1], r[2]);
+   r := parse_ident('"SchemaX"."TableY"');
+   RAISE NOTICE '%', format('%I.%I', r[1], r[2]);
+ END;
+ $$;
+ SELECT parse_ident('foo.boo');
+ SELECT parse_ident('foo.boo[]'); -- should fail
+ SELECT parse_ident('foo.boo[]', strict => false); -- ok
+ -- should fail
+ SELECT parse_ident(' ');
+ SELECT parse_ident(' .aaa');
+ SELECT parse_ident(' aaa . ');
+ SELECT parse_ident('aaa.a%b');
+ SELECT parse_ident(E'X\rXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX');
+ SELECT length(a[1]), length(a[2]) from parse_ident('"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx".yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy') as a ;
+ SELECT parse_ident(' first . "  second  " ."   third   ". "  ' || repeat('x',66) || '"');
+ SELECT parse_ident(' first . "  second  " ."   third   ". "  ' || repeat('x',66) || '"')::name[];
+ SELECT parse_ident(E'"c".X XXXX\002XXXXXX');
+ SELECT parse_ident('1020');
+ SELECT parse_ident('10.20');
+ SELECT parse_ident('.');
+ SELECT parse_ident('.1020');
+ SELECT parse_ident('xxx.1020');
index 0eaaf99e55454249ae531c0bfe32c30fcaf97600,fc472187d873d9cbb9fb34e696b12289bb21c9ae..d8aef9769e9d277017862cfb1d977a2caccf97a2
@@@ -664,9 -664,19 +664,19 @@@ INSERT INTO ceil_floor_round VALUES ('9
  INSERT INTO ceil_floor_round VALUES ('0.0');
  INSERT INTO ceil_floor_round VALUES ('0.0000001');
  INSERT INTO ceil_floor_round VALUES ('-0.000001');
 -SELECT a, ceil(a), ceiling(a), floor(a), round(a) FROM ceil_floor_round;
 +SELECT a, ceil(a), ceiling(a), floor(a), round(a) FROM ceil_floor_round ORDER BY a;
  DROP TABLE ceil_floor_round;
  
+ -- Check rounding, it should round ties away from zero.
+ SELECT i as pow,
+       round((-2.5 * 10 ^ i)::numeric, -i),
+       round((-1.5 * 10 ^ i)::numeric, -i),
+       round((-0.5 * 10 ^ i)::numeric, -i),
+       round((0.5 * 10 ^ i)::numeric, -i),
+       round((1.5 * 10 ^ i)::numeric, -i),
+       round((2.5 * 10 ^ i)::numeric, -i)
+ FROM generate_series(-5,5) AS t(i);
  -- Testing for width_bucket(). For convenience, we test both the
  -- numeric and float8 versions of the function in this file.
  
@@@ -809,8 -819,20 +819,20 @@@ INSERT INTO num_input_test(n1) VALUES (
  INSERT INTO num_input_test(n1) VALUES ('');
  INSERT INTO num_input_test(n1) VALUES (' N aN ');
  
 -SELECT * FROM num_input_test;
 +SELECT * FROM num_input_test ORDER BY n1;
  
+ --
+ -- Test some corner cases for multiplication
+ --
+ select 4790999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+ select 4789999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+ select 4770999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+ select 4769999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
  --
  -- Test some corner cases for division
  --
Simple merge
index 750f80c8bc3daef6c09d7aeda6a7a855fdef1389,776f2292ea59a4f3b3b2a92db50d9ce96902ac1a..c6c0bac9cae1177911a66790bba5788ad4111d1f
@@@ -4154,90 -4237,49 +4242,131 @@@ select testoa(1,2,1); -- fail at updat
  drop function arrayassign1();
  drop function testoa(x1 int, x2 int, x3 int);
  
 +-- Check that DMLs in a plpgsql function work OK, when subsequent queries need
 +-- to open new datanode connections
 +CREATE OR REPLACE FUNCTION TestJoinTempTable_CT()
 +RETURNS void AS $$
 +BEGIN
 +        CREATE TABLE IF NOT EXISTS RealTable(ProductId int, ScenarioId int);
 +        TRUNCATE TABLE RealTable;
 +
 +        CREATE TABLE IF NOT EXISTS TmpBar(NodeId int)
 +                DISTRIBUTE BY REPLICATION;
 +        CREATE TABLE IF NOT EXISTS TmpFoo(TempId int)
 +                DISTRIBUTE BY REPLICATION;
 +END ;
 +$$ LANGUAGE plpgsql;
 +
 +CREATE OR REPLACE FUNCTION TestJoinTempTable_INSERT()
 +RETURNS void AS $$
 +BEGIN
 +        INSERT INTO RealTable(ProductId, ScenarioId)
 +                SELECT generate_series(1,1000) as ProductId, (random() * 100)::int as ScenarioId;
 +
 +        INSERT INTO TmpBar(NodeId)
 +                SELECT generate_series(1,1000);
 +              RAISE INFO 'number of existing rows in RealTable - %', (SELECT count(*) FROM RealTable);
 +              RAISE INFO 'number of existing rows in TmpBar - %', (SELECT count(*) FROM TmpBar);
 +              RAISE INFO 'number of existing rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
 +        INSERT INTO TmpFoo(TempId)
 +               SELECT DISTINCT(PR.ProductId)
 +                 FROM RealTable AS PR
 +                 JOIN TmpBar tmp1 ON PR.ProductId = tmp1.NodeId;
 +
 +              RAISE INFO 'number of rows produced by query - %',
 +                              (SELECT COUNT(DISTINCT(PR.ProductId))
 +                               FROM RealTable AS PR
 +                 JOIN TmpBar tmp1 ON PR.ProductId = tmp1.NodeId);
 +              RAISE INFO 'number of rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
 +              RAISE INFO 'number of existing rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
 +              RAISE INFO 'number of existing rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
 +END ;
 +$$ LANGUAGE plpgsql;
 +
 +SELECT TestJoinTempTable_CT();
 +SELECT TestJoinTempTable_INSERT();
 +
 +DROP TABLE RealTable;
 +DROP TABLE TmpBar;
 +DROP TABLE TmpFoo;
 +
 +CREATE OR REPLACE FUNCTION TestJoinTempTable()
 +RETURNS void AS $$
 +BEGIN
 +        CREATE TABLE IF NOT EXISTS RealTable(ProductId int, ScenarioId int);
 +        TRUNCATE TABLE RealTable;
 +
 +        CREATE TEMPORARY TABLE IF NOT EXISTS TmpBar(NodeId int)
 +                DISTRIBUTE BY REPLICATION;
 +        CREATE TEMPORARY TABLE IF NOT EXISTS TmpFoo(TempId int)
 +                DISTRIBUTE BY REPLICATION;
 +
 +        INSERT INTO RealTable(ProductId, ScenarioId)
 +                SELECT generate_series(1,1000) as ProductId, (random() * 100)::int as ScenarioId;
 +
 +        INSERT INTO TmpBar(NodeId)
 +                SELECT generate_series(1,1000);
 +
 +        INSERT INTO TmpFoo(TempId)
 +               SELECT DISTINCT(PR.ProductId)
 +                 FROM RealTable AS PR
 +                 JOIN TmpBar tmp1 ON PR.ProductId = tmp1.NodeId;
 +END ;
 +$$ LANGUAGE plpgsql;
 +
 +SELECT TestJoinTempTable();
 +
 +-- Multiple invokations of the function showed interesting issues with command
 +-- passdown. So add that to the test case
 +SELECT TestJoinTempTable();
 +SELECT TestJoinTempTable();
 +
 +DROP TABLE RealTable;
 +DROP TABLE TmpBar;
 +DROP TABLE TmpFoo;
  
- -- access to call stack
+ --
+ -- Test handling of expanded arrays
+ --
+ create function returns_rw_array(int) returns int[]
+ language plpgsql as $$
+   declare r int[];
+   begin r := array[$1, $1]; return r; end;
+ $$ stable;
+ create function consumes_rw_array(int[]) returns int
+ language plpgsql as $$
+   begin return $1[1]; end;
+ $$ stable;
+ -- bug #14174
+ explain (verbose, costs off)
+ select i, a from
+   (select returns_rw_array(1) as a offset 0) ss,
+   lateral consumes_rw_array(a) i;
+ select i, a from
+   (select returns_rw_array(1) as a offset 0) ss,
+   lateral consumes_rw_array(a) i;
+ explain (verbose, costs off)
+ select consumes_rw_array(a), a from returns_rw_array(1) a;
+ select consumes_rw_array(a), a from returns_rw_array(1) a;
+ explain (verbose, costs off)
+ select consumes_rw_array(a), a from
+   (values (returns_rw_array(1)), (returns_rw_array(2))) v(a);
+ select consumes_rw_array(a), a from
+   (values (returns_rw_array(1)), (returns_rw_array(2))) v(a);
+ --
+ -- Test access to call stack
+ --
  create function inner_func(int)
  returns int as $$
  declare _context text;
Simple merge
index b1b6201e7c62f1f4035d8f6e64c48092c00209de,dfb20a18e7c972367e82e33536c3a2e988cb7788..b868985958c7380e787417e6beca61f2239926e2
@@@ -130,27 -119,25 +130,29 @@@ FETCH 1 FROM foo
  SELECT * FROM pxtest2;
  
  -- There should be two prepared transactions
 -SELECT gid FROM pg_prepared_xacts;
 +SELECT gid FROM pg_prepared_xacts ORDER BY gid;
 +-- Check prepared transactions in the cluster
 +SELECT pgxc_prepared_xact FROM pgxc_prepared_xacts ORDER by 1;
  
  -- pxtest3 should be locked because of the pending DROP
+ begin;
  set statement_timeout to 2000;
  SELECT * FROM pxtest3;
- reset statement_timeout;
+ rollback;
  
  -- Disconnect, we will continue testing in a different backend
  \c -
  
  -- There should still be two prepared transactions
 -SELECT gid FROM pg_prepared_xacts;
 +SELECT gid FROM pg_prepared_xacts ORDER BY gid;
 +-- Check prepared transactions in the cluster
 +SELECT pgxc_prepared_xact FROM pgxc_prepared_xacts ORDER by 1;
  
  -- pxtest3 should still be locked because of the pending DROP
+ begin;
  set statement_timeout to 2000;
  SELECT * FROM pxtest3;
- reset statement_timeout;
+ rollback;
  
  -- Commit table creation
  COMMIT PREPARED 'regress-one';
index 2db3b94962c4e6c98e00e69d8512360f082a0395,0aa9c672d554a049f5d2a05bcfeaefe2977e397f..f89dfa5e5cda1dac853b3c73ac3fd00651979437
@@@ -95,10 -95,10 +95,10 @@@ SELECT * FROM atest1 WHERE ( b IN ( SEL
  SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) );
  
  
- SET SESSION AUTHORIZATION regressuser3;
+ SET SESSION AUTHORIZATION regress_user3;
  SELECT session_user, current_user;
  
 -SELECT * FROM atest1; -- ok
 +SELECT * FROM atest1 ORDER BY 1; -- ok
  SELECT * FROM atest2; -- fail
  INSERT INTO atest1 VALUES (2, 'two'); -- fail
  INSERT INTO atest2 VALUES ('foo', true); -- fail
@@@ -178,19 -178,18 +178,19 @@@ where x < 0
  reset constraint_exclusion;
  
  CREATE VIEW atestv4 AS SELECT * FROM atestv3; -- nested view
 -SELECT * FROM atestv4; -- ok
 +SELECT * FROM atestv4; -- fail due to issue 3520503, see above
- GRANT SELECT ON atestv4 TO regressuser2;
+ GRANT SELECT ON atestv4 TO regress_user2;
  
- SET SESSION AUTHORIZATION regressuser2;
+ SET SESSION AUTHORIZATION regress_user2;
  
  -- Two complex cases:
  
  SELECT * FROM atestv3; -- fail
- SELECT * FROM atestv4; -- ok (even though regressuser2 cannot access underlying atestv3)
 +-- fail due to issue 3520503, see above
+ SELECT * FROM atestv4; -- ok (even though regress_user2 cannot access underlying atestv3)
  
  SELECT * FROM atest2; -- ok
- SELECT * FROM atestv2; -- fail (even though regressuser2 can access underlying atest2)
+ SELECT * FROM atestv2; -- fail (even though regress_user2 can access underlying atest2)
  
  -- Test column level permissions
  
@@@ -218,25 -217,25 +218,25 @@@ SELECT (j.*) IS NULL FROM (atest5 a JOI
  SELECT 1 FROM atest5 WHERE two = 2; -- fail
  SELECT * FROM atest1, atest5; -- fail
  SELECT atest1.* FROM atest1, atest5; -- ok
 -SELECT atest1.*,atest5.one FROM atest1, atest5; -- ok
 +SELECT atest1.*,atest5.one FROM atest1, atest5; -- ok 
  SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.two); -- fail
 -SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.one); -- ok
 +SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.one); -- ok 
  SELECT one, two FROM atest5; -- fail
  
- SET SESSION AUTHORIZATION regressuser1;
- GRANT SELECT (one,two) ON atest6 TO regressuser4;
+ SET SESSION AUTHORIZATION regress_user1;
+ GRANT SELECT (one,two) ON atest6 TO regress_user4;
  
- SET SESSION AUTHORIZATION regressuser4;
+ SET SESSION AUTHORIZATION regress_user4;
  SELECT one, two FROM atest5 NATURAL JOIN atest6; -- fail still
  
- SET SESSION AUTHORIZATION regressuser1;
- GRANT SELECT (two) ON atest5 TO regressuser4;
+ SET SESSION AUTHORIZATION regress_user1;
+ GRANT SELECT (two) ON atest5 TO regress_user4;
  
- SET SESSION AUTHORIZATION regressuser4;
- SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now 
+ SET SESSION AUTHORIZATION regress_user4;
+ SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now
  
  -- test column-level privileges for INSERT and UPDATE
 -INSERT INTO atest5 (two) VALUES (3); -- ok
 +INSERT INTO atest5 (two) VALUES (3); -- fail due to issue 3520503, see above
  COPY atest5 FROM stdin; -- fail
  COPY atest5 (two) FROM stdin; -- ok
  1
@@@ -311,18 -310,18 +311,18 @@@ SET SESSION AUTHORIZATION regress_user4
  SELECT atest6 FROM atest6; -- fail
  SELECT one FROM atest5 NATURAL JOIN atest6; -- fail
  
- SET SESSION AUTHORIZATION regressuser1;
+ SET SESSION AUTHORIZATION regress_user1;
  ALTER TABLE atest6 DROP COLUMN three;
  
- SET SESSION AUTHORIZATION regressuser4;
+ SET SESSION AUTHORIZATION regress_user4;
  SELECT atest6 FROM atest6; -- ok
 -SELECT one FROM atest5 NATURAL JOIN atest6; -- ok
 +SELECT one FROM atest5 NATURAL JOIN atest6; -- ok 
  
- SET SESSION AUTHORIZATION regressuser1;
+ SET SESSION AUTHORIZATION regress_user1;
  ALTER TABLE atest6 DROP COLUMN two;
- REVOKE SELECT (one,blue) ON atest6 FROM regressuser4;
+ REVOKE SELECT (one,blue) ON atest6 FROM regress_user4;
  
- SET SESSION AUTHORIZATION regressuser4;
+ SET SESSION AUTHORIZATION regress_user4;
  SELECT * FROM atest6; -- fail
  SELECT 1 FROM atest6; -- fail
  
@@@ -335,23 -334,23 +335,23 @@@ SET SESSION AUTHORIZATION regress_user1
  CREATE TABLE atestp1 (f1 int, f2 int) WITH OIDS;
  CREATE TABLE atestp2 (fx int, fy int) WITH OIDS;
  CREATE TABLE atestc (fz int) INHERITS (atestp1, atestp2);
- GRANT SELECT(fx,fy,oid) ON atestp2 TO regressuser2;
- GRANT SELECT(fx) ON atestc TO regressuser2;
+ GRANT SELECT(fx,fy,oid) ON atestp2 TO regress_user2;
+ GRANT SELECT(fx) ON atestc TO regress_user2;
  
- SET SESSION AUTHORIZATION regressuser2;
+ SET SESSION AUTHORIZATION regress_user2;
  SELECT fx FROM atestp2; -- ok
 -SELECT fy FROM atestp2; -- ok
 -SELECT atestp2 FROM atestp2; -- ok
 -SELECT oid FROM atestp2; -- ok
 +SELECT fy FROM atestp2; -- fail due to issue 3520503, see above
 +SELECT atestp2 FROM atestp2; -- fail due to issue 3520503, see above
 +SELECT oid FROM atestp2; -- fail due to issue 3520503, see above
  SELECT fy FROM atestc; -- fail
  
- SET SESSION AUTHORIZATION regressuser1;
- GRANT SELECT(fy,oid) ON atestc TO regressuser2;
+ SET SESSION AUTHORIZATION regress_user1;
+ GRANT SELECT(fy,oid) ON atestc TO regress_user2;
  
- SET SESSION AUTHORIZATION regressuser2;
+ SET SESSION AUTHORIZATION regress_user2;
  SELECT fx FROM atestp2; -- still ok
  SELECT fy FROM atestp2; -- ok
 -SELECT atestp2 FROM atestp2; -- ok
 +SELECT atestp2 FROM atestp2; -- fail due to issue 3520503, see above
  SELECT oid FROM atestp2; -- ok
  
  -- privileges on functions, languages
@@@ -383,12 -382,12 +383,12 @@@ SET SESSION AUTHORIZATION regress_user2
  SELECT testfunc1(5), testfunc2(5); -- ok
  CREATE FUNCTION testfunc3(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -- fail
  
- SET SESSION AUTHORIZATION regressuser3;
+ SET SESSION AUTHORIZATION regress_user3;
  SELECT testfunc1(5); -- fail
  SELECT col1 FROM atest2 WHERE col2 = true; -- fail
 -SELECT testfunc4(true); -- ok
 +SELECT testfunc4(true); -- fail due to issue 3520503, see above
  
- SET SESSION AUTHORIZATION regressuser4;
+ SET SESSION AUTHORIZATION regress_user4;
  SELECT testfunc1(5); -- ok
  
  DROP FUNCTION testfunc1(int); -- fail
Simple merge
Simple merge
index 880c54b77c8de694a79f5347ca9f6a8f0f5b0fd4,7fcefe45026f665ce7e2d68ba83b15155188b461..00a64ac83d04c8599b735ad7d6a280b9d8de267b
@@@ -48,15 -48,11 +48,15 @@@ CREATE TABLE uaccount 
  );
  GRANT SELECT ON uaccount TO public;
  INSERT INTO uaccount VALUES
-     ('rls_regress_user0', 99),
-     ('rls_regress_user1', 1),
-     ('rls_regress_user2', 2),
-     ('rls_regress_user3', 3);
+     ('regress_rls_alice', 99),
+     ('regress_rls_bob', 1),
+     ('regress_rls_carol', 2),
+     ('regress_rls_dave', 3);
  
 +-- PGXL
 +--   Distribute by replication so that "document" table below can reference "cid"
 +--   column
 +--
  CREATE TABLE category (
      cid        int primary key,
      cname      text
Simple merge
Simple merge
Simple merge
Simple merge
index d55aa13bdcc6fdc21844a8eb4f7bf68d3a133d1a,b7957cbb9aa85927972410bc003e9d173d1a34a6..41c858eee61a80b6ff13af85a2cfa0ff52c5d410
@@@ -141,29 -141,34 +141,34 @@@ INSERT INTO TIMESTAMP_TBL VALUES ('Jan 
  INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 -0097');
  INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC');
  
 -SELECT '' AS "64", d1 FROM TIMESTAMP_TBL;
 +SELECT '' AS "64", d1 FROM TIMESTAMP_TBL ORDER BY d1; 
  
+ -- Check behavior at the lower boundary of the timestamp range
+ SELECT '4714-11-24 00:00:00 BC'::timestamp;
+ SELECT '4714-11-23 23:59:59 BC'::timestamp;  -- out of range
+ -- The upper boundary differs between integer and float timestamps, so no check
  -- Demonstrate functions and operators
  SELECT '' AS "48", d1 FROM TIMESTAMP_TBL
 -   WHERE d1 > timestamp without time zone '1997-01-02';
 +   WHERE d1 > timestamp without time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "15", d1 FROM TIMESTAMP_TBL
 -   WHERE d1 < timestamp without time zone '1997-01-02';
 +   WHERE d1 < timestamp without time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS one, d1 FROM TIMESTAMP_TBL
 -   WHERE d1 = timestamp without time zone '1997-01-02';
 +   WHERE d1 = timestamp without time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "63", d1 FROM TIMESTAMP_TBL
 -   WHERE d1 != timestamp without time zone '1997-01-02';
 +   WHERE d1 != timestamp without time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "16", d1 FROM TIMESTAMP_TBL
 -   WHERE d1 <= timestamp without time zone '1997-01-02';
 +   WHERE d1 <= timestamp without time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "49", d1 FROM TIMESTAMP_TBL
 -   WHERE d1 >= timestamp without time zone '1997-01-02';
 +   WHERE d1 >= timestamp without time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "54", d1 - timestamp without time zone '1997-01-02' AS diff
 -   FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
 +   FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01' ORDER BY d1;
  
  SELECT '' AS date_trunc_week, date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc;
  
index f1cd872242f9d9e5bd76c5291dd600bde7d5ae64,c023095bb89b4bf5df74431d0f624020d403ca57..8c16917127c137ba4e776109ec083d204ae9ad08
@@@ -160,29 -160,36 +160,36 @@@ SELECT 'Wed Jul 11 10:51:14 GMT+4 2001'
  SELECT 'Wed Jul 11 10:51:14 PST-03:00 2001'::timestamptz;
  SELECT 'Wed Jul 11 10:51:14 PST+03:00 2001'::timestamptz;
  
 -SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL;
 +SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL ORDER BY d1;
  
+ -- Check behavior at the lower boundary of the timestamp range
+ SELECT '4714-11-24 00:00:00+00 BC'::timestamptz;
+ SELECT '4714-11-23 16:00:00-08 BC'::timestamptz;
+ SELECT 'Sun Nov 23 16:00:00 4714 PST BC'::timestamptz;
+ SELECT '4714-11-23 23:59:59+00 BC'::timestamptz;  -- out of range
+ -- The upper boundary differs between integer and float timestamps, so no check
  -- Demonstrate functions and operators
  SELECT '' AS "48", d1 FROM TIMESTAMPTZ_TBL
 -   WHERE d1 > timestamp with time zone '1997-01-02';
 +   WHERE d1 > timestamp with time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "15", d1 FROM TIMESTAMPTZ_TBL
 -   WHERE d1 < timestamp with time zone '1997-01-02';
 +   WHERE d1 < timestamp with time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS one, d1 FROM TIMESTAMPTZ_TBL
 -   WHERE d1 = timestamp with time zone '1997-01-02';
 +   WHERE d1 = timestamp with time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "63", d1 FROM TIMESTAMPTZ_TBL
 -   WHERE d1 != timestamp with time zone '1997-01-02';
 +   WHERE d1 != timestamp with time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "16", d1 FROM TIMESTAMPTZ_TBL
 -   WHERE d1 <= timestamp with time zone '1997-01-02';
 +   WHERE d1 <= timestamp with time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "49", d1 FROM TIMESTAMPTZ_TBL
 -   WHERE d1 >= timestamp with time zone '1997-01-02';
 +   WHERE d1 >= timestamp with time zone '1997-01-02' ORDER BY d1;
  
  SELECT '' AS "54", d1 - timestamp with time zone '1997-01-02' AS diff
 -   FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
 +   FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01' ORDER BY d1;
  
  SELECT '' AS date_trunc_week, date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS week_trunc;
  
@@@ -205,42 -212,59 +212,59 @@@ SELECT '' AS "54", d1 as timestamptz
  SELECT '' AS "54", d1 as timestamptz,
     date_part( 'isoyear', d1) AS isoyear, date_part( 'week', d1) AS week,
     date_part( 'dow', d1) AS dow
 -   FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
 +   FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01' ORDER BY d1;
  
  -- TO_CHAR()
 -SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
 -   FROM TIMESTAMPTZ_TBL;
 -
 +SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') 
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;
 +      
  SELECT '' AS to_char_2, to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM')
 -   FROM TIMESTAMPTZ_TBL;
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;  
  
  SELECT '' AS to_char_3, to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J')
 -   FROM TIMESTAMPTZ_TBL;
 -
 -SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
 -   FROM TIMESTAMPTZ_TBL;
 -
 -SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
 -   FROM TIMESTAMPTZ_TBL;
 -
 -SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
 -   FROM TIMESTAMPTZ_TBL;
 -
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;
 +      
 +SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') 
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;  
 +      
 +SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') 
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;
 +
 +SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') 
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;          
 +              
  SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS')
 -   FROM TIMESTAMPTZ_TBL;
 -
 -SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
 -   FROM TIMESTAMPTZ_TBL;
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;          
  
 -SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
 -   FROM TIMESTAMPTZ_TBL;
 +SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') 
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;
 +  
 +SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') 
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;   
  
  SELECT '' AS to_char_10, to_char(d1, 'IYYY IYY IY I IW IDDD ID')
 -   FROM TIMESTAMPTZ_TBL;
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;
  
  SELECT '' AS to_char_11, to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID')
 -   FROM TIMESTAMPTZ_TBL;
 +   FROM TIMESTAMPTZ_TBL ORDER BY d1;
  
+ -- Check OF with various zone offsets, particularly fractional hours
+ SET timezone = '00:00';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '+02:00';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '-13:00';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '-00:30';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '00:30';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '-04:30';
+ SELECT to_char(now(), 'OF');
+ SET timezone = '04:30';
+ SELECT to_char(now(), 'OF');
+ RESET timezone;
  CREATE TABLE TIMESTAMPTZ_TST (a int , b timestamptz);
  
  -- Test year field value with len > 4
Simple merge
index 1dc2632c364256cf09d608130a16ce4b466142b4,65eb9438d4941212552c94ec05356ebd0c3ead5f..57fe2d438fc8e1c8e15106cc8723bf5bf99a2faa
@@@ -89,13 -89,13 +89,13 @@@ SELECT * FROM ts_stat('SELECT a FROM te
  SELECT ts_lexize('english_stem', 'skies');
  SELECT ts_lexize('english_stem', 'identity');
  
 -SELECT * FROM ts_token_type('default');
 +SELECT * FROM ts_token_type('default') ORDER BY tokid;
  
- SELECT * FROM ts_parse('default', '345 [email protected] '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/?  ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 [email protected] qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>">
+ SELECT * FROM ts_parse('default', '345 [email protected] '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/?  ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 [email protected] [email protected] [email protected] [email protected] qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>">
  /usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234
 -<i <b> wow  < jqw <> qwerty');
 +<i <b> wow  < jqw <> qwerty') ORDER BY tokid,token;
  
- SELECT to_tsvector('english', '345 [email protected] '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/?  ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 [email protected] qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>">
+ SELECT to_tsvector('english', '345 [email protected] '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/?  ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 [email protected] [email protected] [email protected] [email protected] qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>">
  /usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234
  <i <b> wow  < jqw <> qwerty');
  
@@@ -383,8 -479,15 +479,16 @@@ insert into pendtest values (to_tsvecto
  insert into pendtest values (to_tsvector('Lore ipsum'));
  select * from pendtest where 'ipsu:*'::tsquery @@ ts;
  select * from pendtest where 'ipsa:*'::tsquery @@ ts;
 -select * from pendtest where 'ips:*'::tsquery @@ ts;
 +select * from pendtest where 'ips:*'::tsquery @@ ts ORDER BY 1;
  select * from pendtest where 'ipt:*'::tsquery @@ ts;
  select * from pendtest where 'ipi:*'::tsquery @@ ts;
 +drop table pendtest;
  
+ --check OP_PHRASE on index
+ create temp table phrase_index_test(fts tsvector);
+ insert into phrase_index_test values ('A fat cat has just eaten a rat.');
+ insert into phrase_index_test values (to_tsvector('english', 'A fat cat has just eaten a rat.'));
+ create index phrase_index_test_idx on phrase_index_test using gin(fts);
+ set enable_seqscan = off;
+ select * from phrase_index_test where fts @@ phraseto_tsquery('english', 'fat cat');
+ set enable_seqscan = on;
index 4a1370bd23a6de8d1aa17b0a06ba55df93703bcd,03c3f9d35ebe3207cb5fbd78e0ce23874500f09f..091373db41526d0c36049d2797e121aadb3dc9e8
@@@ -391,11 -391,11 +391,11 @@@ DROP TABLE base_tbl CASCADE
  
  -- permissions checks
  
- CREATE USER view_user1;
- CREATE USER view_user2;
+ CREATE USER regress_view_user1;
+ CREATE USER regress_view_user2;
  
- SET SESSION AUTHORIZATION view_user1;
+ SET SESSION AUTHORIZATION regress_view_user1;
 -CREATE TABLE base_tbl(a int, b text, c float);
 +CREATE TABLE base_tbl(a int, b text, c float) DISTRIBUTE BY REPLICATION;
  INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0);
  CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl;
  INSERT INTO rw_view1 VALUES ('Row 2', 2.0, 2);
Simple merge
Simple merge
Simple merge