#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
- # Generated by GNU Autoconf 2.69 for PostgreSQL 10.3 (Postgres-XL 10r1beta1).
-# Generated by GNU Autoconf 2.69 for PostgreSQL 10.5.
++# Generated by GNU Autoconf 2.69 for PostgreSQL 10.5 (Postgres-XL 10r1beta1).
#
-# Report bugs to <pgsql-bugs@postgresql.org>.
+# Report bugs to <bugs@postgres-xl.org>.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
# Identity of this package.
PACKAGE_NAME='PostgreSQL'
PACKAGE_TARNAME='postgresql'
- PACKAGE_VERSION='10.3 (Postgres-XL 10r1beta1)'
-PACKAGE_VERSION='10.5'
-PACKAGE_STRING='PostgreSQL 10.5'
++PACKAGE_VERSION='10.5 (Postgres-XL 10r1beta1)'
+PACKAGE_XC_VERSION='10r1beta1'
- PACKAGE_STRING='PostgreSQL 10.3 (Postgres-XL 10r1beta1)'
++PACKAGE_STRING='PostgreSQL 10.5 (Postgres-XL 10r1beta1)'
PACKAGE_URL=''
ac_unique_file="src/backend/access/common/heaptuple.c"
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
- \`configure' configures PostgreSQL 10.3 (Postgres-XL 10r1beta1) to adapt to many kinds of systems.
-\`configure' configures PostgreSQL 10.5 to adapt to many kinds of systems.
++\`configure' configures PostgreSQL 10.5 (Postgres-XL 10r1beta1) to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of PostgreSQL 10.3 (Postgres-XL 10r1beta1):";;
- short | recursive ) echo "Configuration of PostgreSQL 10.5:";;
++ short | recursive ) echo "Configuration of PostgreSQL 10.5 (Postgres-XL 10r1beta1):";;
esac
cat <<\_ACEOF
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
- PostgreSQL configure 10.3 (Postgres-XL 10r1beta1)
-PostgreSQL configure 10.5
++PostgreSQL configure 10.5 (Postgres-XL 10r1beta1)
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
- It was created by PostgreSQL $as_me 10.3 (Postgres-XL 10r1beta1), which was
-It was created by PostgreSQL $as_me 10.5, which was
++It was created by PostgreSQL $as_me 10.5 (Postgres-XL 10r1beta1), which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
- This file was extended by PostgreSQL $as_me 10.3 (Postgres-XL 10r1beta1), which was
-This file was extended by PostgreSQL $as_me 10.5, which was
++This file was extended by PostgreSQL $as_me 10.5 (Postgres-XL 10r1beta1), which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
- PostgreSQL config.status 10.3 (Postgres-XL 10r1beta1)
-PostgreSQL config.status 10.5
++PostgreSQL config.status 10.5 (Postgres-XL 10r1beta1)
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
dnl
m4_pattern_forbid(^PGAC_)dnl to catch undefined macros
m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required.
Untested combinations of 'autoconf' and PostgreSQL versions are not
Operating System (example: Linux 2.4.18) :
- PostgreSQL version (example: PostgreSQL 10.3): Postgres-XL 10r1beta1
- PostgreSQL version (example: PostgreSQL 10.5): PostgreSQL 10.5
++ PostgreSQL version (example: PostgreSQL 10.5): Postgres-XL 10r1beta1
Compiler used (example: gcc 3.3.5) :
LD = @LD@
with_gnu_ld = @with_gnu_ld@
- # We want -L for libpgport.a and libpgcommon.a to be first in LDFLAGS. We
- # also need LDFLAGS to be a "recursively expanded" variable, else adjustments
- # to rpathdir don't work right. So we must NOT do LDFLAGS := something,
- # meaning this has to be done first and elsewhere we must only do LDFLAGS +=
- # something.
+ # It's critical that within LDFLAGS, all -L switches pointing to build-tree
+ # directories come before any -L switches pointing to external directories.
+ # Otherwise it's possible for, e.g., a platform-provided copy of libpq.so
+ # to get linked in place of the one we've built. Therefore we adopt the
+ # convention that the first component of LDFLAGS is an extra variable
+ # LDFLAGS_INTERNAL, and -L and -l switches for PG's own libraries must be
+ # put into LDFLAGS_INTERNAL, so they will appear ahead of those for external
+ # libraries.
+ #
+ # We need LDFLAGS and LDFLAGS_INTERNAL to be "recursively expanded" variables,
+ # else adjustments to, e.g., rpathdir don't work right. So we must NOT do
+ # "LDFLAGS := something" anywhere, ditto for LDFLAGS_INTERNAL.
+ # These initial assignments must be "=" type, and elsewhere we must only do
+ # "LDFLAGS += something" or "LDFLAGS_INTERNAL += something".
+# PGXC_BEGIN
ifdef PGXS
- LDFLAGS = -L$(libdir)
+ LDFLAGS_INTERNAL = -L$(libdir)
else
- LDFLAGS = -L$(top_builddir)/src/port -L$(top_builddir)/src/common
+ LDFLAGS_INTERNAL = -L$(top_builddir)/src/port -L$(top_builddir)/src/common
endif
- LDFLAGS += @LDFLAGS@
+# PGXC_END
+ LDFLAGS = $(LDFLAGS_INTERNAL) @LDFLAGS@
LDFLAGS_EX = @LDFLAGS_EX@
# LDFLAGS_SL might have already been assigned by calling makefile
AtEOSubXact_HashTables(true, s->nestingLevel);
AtEOSubXact_PgStat(true, s->nestingLevel);
AtSubCommit_Snapshot(s->nestingLevel);
+#ifdef XCP
+ AtSubCommit_WaitedXids();
+#endif
+ AtEOSubXact_ApplyLauncher(true, s->nestingLevel);
/*
* We need to restore the upper transaction's read-only state, in case the
AtEOSubXact_HashTables(false, s->nestingLevel);
AtEOSubXact_PgStat(false, s->nestingLevel);
AtSubAbort_Snapshot(s->nestingLevel);
+#ifdef XCP
+ AtSubAbort_WaitedXids();
+#endif
+ AtEOSubXact_ApplyLauncher(false, s->nestingLevel);
}
/*
* tuples inserted by an INSERT command.
*/
processed++;
+ }
- if (saved_resultRelInfo)
- {
- resultRelInfo = saved_resultRelInfo;
- estate->es_result_relation_info = resultRelInfo;
- }
+ /* Restore the saved ResultRelInfo */
+ if (saved_resultRelInfo)
+ {
+ resultRelInfo = saved_resultRelInfo;
+ estate->es_result_relation_info = resultRelInfo;
}
+#ifdef PGXC
+ }
+#endif
}
/* Flush any remaining buffered tuples */
matviewname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
RelationGetRelationName(matviewRel));
tempRel = heap_open(tempOid, NoLock);
- tempname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(tempRel)),
+ tempschema = get_namespace_name(RelationGetNamespace(tempRel));
+ tempname = quote_qualified_identifier(tempschema,
RelationGetRelationName(tempRel));
- diffname = make_temptable_name_n(tempname, 2);
+ diffname = make_temptable_name_n(RelationGetRelationName(tempRel), 2);
+ qualified_diffname = quote_qualified_identifier(tempschema, diffname);
relnatts = matviewRel->rd_rel->relnatts;
- usedForQual = (bool *) palloc0(sizeof(bool) * relnatts);
/* Open SPI context. */
if (SPI_connect() != SPI_OK_CONNECT)
* get information on the (current) result relation
*/
resultRelInfo = estate->es_result_relation_info;
-
- /* Determine the partition to heap_insert the tuple into */
- if (mtstate->mt_partition_dispatch_info)
- {
- int leaf_part_index;
- TupleConversionMap *map;
-
- /*
- * Away we go ... If we end up not finding a partition after all,
- * ExecFindPartition() does not return and errors out instead.
- * Otherwise, the returned value is to be used as an index into arrays
- * mt_partitions[] and mt_partition_tupconv_maps[] that will get us
- * the ResultRelInfo and TupleConversionMap for the partition,
- * respectively.
- */
- leaf_part_index = ExecFindPartition(resultRelInfo,
- mtstate->mt_partition_dispatch_info,
- slot,
- estate);
- Assert(leaf_part_index >= 0 &&
- leaf_part_index < mtstate->mt_num_partitions);
-
- /*
- * Save the old ResultRelInfo and switch to the one corresponding to
- * the selected partition.
- */
- saved_resultRelInfo = resultRelInfo;
- resultRelInfo = mtstate->mt_partitions + leaf_part_index;
-
- /* We do not yet have a way to insert into a foreign partition */
- if (resultRelInfo->ri_FdwRoutine)
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot route inserted tuples to a foreign table")));
-
- /* For ExecInsertIndexTuples() to work on the partition's indexes */
- estate->es_result_relation_info = resultRelInfo;
-
- /*
- * If we're capturing transition tuples, we might need to convert from
- * the partition rowtype to parent rowtype.
- */
- if (mtstate->mt_transition_capture != NULL)
- {
- if (resultRelInfo->ri_TrigDesc &&
- (resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
- resultRelInfo->ri_TrigDesc->trig_insert_instead_row))
- {
- /*
- * If there are any BEFORE or INSTEAD triggers on the
- * partition, we'll have to be ready to convert their result
- * back to tuplestore format.
- */
- mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
- mtstate->mt_transition_capture->tcs_map =
- mtstate->mt_transition_tupconv_maps[leaf_part_index];
- }
- else
- {
- /*
- * Otherwise, just remember the original unconverted tuple, to
- * avoid a needless round trip conversion.
- */
- mtstate->mt_transition_capture->tcs_original_insert_tuple = tuple;
- mtstate->mt_transition_capture->tcs_map = NULL;
- }
- }
- if (mtstate->mt_oc_transition_capture != NULL)
- mtstate->mt_oc_transition_capture->tcs_map =
- mtstate->mt_transition_tupconv_maps[leaf_part_index];
-
- /*
- * We might need to convert from the parent rowtype to the partition
- * rowtype.
- */
- map = mtstate->mt_partition_tupconv_maps[leaf_part_index];
- if (map)
- {
- Relation partrel = resultRelInfo->ri_RelationDesc;
-
- tuple = do_convert_tuple(tuple, map);
-
- /*
- * We must use the partition's tuple descriptor from this point
- * on, until we're finished dealing with the partition. Use the
- * dedicated slot for that.
- */
- slot = mtstate->mt_partition_tuple_slot;
- Assert(slot != NULL);
- ExecSetSlotDescriptor(slot, RelationGetDescr(partrel));
- ExecStoreTuple(tuple, slot, InvalidBuffer, true);
- }
- }
-
resultRelationDesc = resultRelInfo->ri_RelationDesc;
-
/*
* If the result relation has OIDs, force the tuple's OID to zero so that
* heap_insert will assign a fresh OID. Usually the OID already will be
try_parallel_aggregation = true;
}
- else if (parse->hasTargetSRFs)
- {
- try_distributed_aggregation = false;
- }
+ /*
+ * The distributed aggregation however works even if there are no partial
+ * paths (when the distribution key is included in the grouping keys, we
+ * may simply push down the whole aggregate).
+ *
+ * XXX We currently don't even try to push down grouping sets, although we
+ * might do that when all grouping sets include the distribution key. But
+ * that seems like a fairly rare case, as in most cases there will be
+ * empty grouping set () aggregating all the data. So let's look into this
+ * optimization later.
+ */
+ if (!grouped_rel->consider_parallel)
+ {
+ /* Not even parallel-safe. */
+ try_distributed_aggregation = false;
+ }
+ else if (!parse->hasAggs && parse->groupClause == NIL)
+ {
+ /*
+ * We don't know how to do parallel aggregation unless we have either
+ * some aggregates or a grouping clause.
+ */
+ try_distributed_aggregation = false;
+ }
+ else if (parse->groupingSets)
+ {
+ /* We don't know how to do grouping sets in parallel. */
+ try_distributed_aggregation = false;
+ }
+ else if (agg_costs->hasNonPartial || agg_costs->hasNonSerial)
+ {
+ /* Insufficient support for partial mode. */
+ try_distributed_aggregation = false;
+ }
+ else
+ {
+ /* Everything looks good. */
+ try_distributed_aggregation = true;
+ }
+
+ /* Whenever parallel aggregation is allowed, distributed should be too. */
+ Assert(!(try_parallel_aggregation && !try_distributed_aggregation));
+
/*
* Before generating paths for grouped_rel, we first generate any possible
* partial paths; that way, later code can easily consider both parallel
#include "catalog/pg_constraint_fn.h"
#include "catalog/pg_opclass.h"
#include "catalog/pg_operator.h"
+ #include "catalog/pg_statistic_ext.h"
#include "catalog/pg_type.h"
+#ifdef XCP
+#include "catalog/pgxc_node.h"
+#endif
#include "commands/comment.h"
#include "commands/defrem.h"
#include "commands/sequence.h"
LockClauseStrength strength,
LockWaitPolicy waitPolicy, bool pushedDown);
+#ifdef XCP
+extern void ParseAnalyze_callback(ParseState *pstate, Query *query);
+extern post_parse_analyze_hook_type prev_ParseAnalyze_callback;
+#endif
+ extern List *BuildOnConflictExcludedTargetlist(Relation targetrel,
+ Index exclRelIndex);
+
#endif /* ANALYZE_H */
#define MEMSET_LOOP_LIMIT 1024
/* Define to the address where bug reports for this package should be sent. */
/* Define to the full name of this package. */
-#define PACKAGE_NAME "PostgreSQL"
+#define PACKAGE_NAME "Postgres-XL"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "PostgreSQL 10.5"
+#define PACKAGE_STRING "Postgres-XL 10r1beta1"
/* Define to the version of this package. */
- #define PACKAGE_VERSION "10.3"
+ #define PACKAGE_VERSION "10.5"
/* Define to the name of a signed 128-bit integer type. */
#undef PG_INT128_TYPE
#define PG_INT64_TYPE long long int
/* PostgreSQL version as a string */
- #define PG_VERSION "10.3"
+ #define PG_VERSION "10.5"
/* PostgreSQL version as a number */
- #define PG_VERSION_NUM 100003
+ #define PG_VERSION_NUM 100005
/* Define to the one symbol short name of this package. */
-#define PACKAGE_TARNAME "postgresql"
+#define PACKAGE_TARNAME "postgres-xl"
+
+/* Postgres-XC version as a string */
+#define PGXC_VERSION "1.1devel"
+
+/* Postgres-XC version as a number */
+#define PGXC_VERSION_NUM 10100
/* Define to the name of the default PostgreSQL service principal in Kerberos.
(--with-krb-srvnam=NAME) */
c | integer | | | | plain | |
Partition of: range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE)
Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND ((abs(a) > 6) OR ((abs(a) = 6) AND (abs(b) >= 8))) AND (abs(a) <= 9))
+Distribute By: HASH(a)
+Location Nodes: ALL DATANODES
DROP TABLE range_parted4;
+ -- user-defined operator class in partition key
+ CREATE FUNCTION my_int4_sort(int4,int4) RETURNS int LANGUAGE sql
+ AS $$ SELECT case WHEN $1 = $2 THEN 0 WHEN $1 > $2 THEN 1 ELSE -1 END; $$;
+ CREATE OPERATOR CLASS test_int4_ops FOR TYPE int4 USING btree AS
+ OPERATOR 1 < (int4,int4), OPERATOR 2 <= (int4,int4),
+ OPERATOR 3 = (int4,int4), OPERATOR 4 >= (int4,int4),
+ OPERATOR 5 > (int4,int4), FUNCTION 1 my_int4_sort(int4,int4);
+ CREATE TABLE partkey_t (a int4) PARTITION BY RANGE (a test_int4_ops);
+ CREATE TABLE partkey_t_1 PARTITION OF partkey_t FOR VALUES FROM (0) TO (1000);
+ INSERT INTO partkey_t VALUES (100);
+ INSERT INTO partkey_t VALUES (200);
-- cleanup
DROP TABLE parted, list_parted, range_parted, list_parted2, range_parted2, range_parted3;
+ DROP TABLE partkey_t;
+ DROP OPERATOR CLASS test_int4_ops USING btree;
+ DROP FUNCTION my_int4_sort(int4,int4);
-- comments on partitioned tables columns
CREATE TABLE parted_col_comment (a int, b text) PARTITION BY LIST (a);
COMMENT ON TABLE parted_col_comment IS 'Am partitioned table';
--------+-----------+-----------+----------+---------+----------+--------------+-------------
a | integer[] | | | | extended | |
Partition of: arrlp FOR VALUES IN ('{1}', '{2}')
- Partition constraint: ((a IS NOT NULL) AND (((a)::anyarray OPERATOR(pg_catalog.=) '{1}'::integer[]) OR ((a)::anyarray OPERATOR(pg_catalog.=) '{2}'::integer[])))
+ Partition constraint: ((a IS NOT NULL) AND ((a = '{1}'::integer[]) OR (a = '{2}'::integer[])))
+Distribute By: ROUND ROBIN
+Location Nodes: ALL DATANODES
DROP TABLE arrlp;
+ -- partition on boolean column
+ create table boolspart (a bool) partition by list (a);
+ create table boolspart_t partition of boolspart for values in (true);
+ create table boolspart_f partition of boolspart for values in (false);
+ \d+ boolspart
+ Table "public.boolspart"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+ --------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | boolean | | | | plain | |
+ Partition key: LIST (a)
+ Partitions: boolspart_f FOR VALUES IN (false),
+ boolspart_t FOR VALUES IN (true)
+
+ drop table boolspart;
+ -- partitions mixing temporary and permanent relations
+ create table perm_parted (a int) partition by list (a);
+ create temporary table temp_parted (a int) partition by list (a);
+ create table perm_part partition of temp_parted for values in (1, 2); -- error
+ ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted"
+ create temp table temp_part partition of perm_parted for values in (1, 2); -- error
+ ERROR: cannot create a temporary relation as partition of permanent relation "perm_parted"
+ create temp table temp_part partition of temp_parted for values in (1, 2); -- ok
+ drop table perm_parted cascade;
+ drop table temp_parted cascade;
"ctlt_all_expr_idx" btree ((a || b))
Check constraints:
"ctlt1_a_check" CHECK (length(a) > 2)
+ Statistics objects:
+ "public"."ctlt_all_a_b_stat" (ndistinct, dependencies) ON a, b FROM ctlt_all
+Distribute By: HASH(a)
+Location Nodes: ALL DATANODES
SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid;
relname | objsubid | description
Partition key: LIST (c1)
Check constraints:
"pt2chk1" CHECK (c1 > 0)
+Distribute By: HASH(c1)
+Location Nodes: ALL DATANODES
\d+ pt2_1
- Foreign table "public.pt2_1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | not null | | | extended | |
- c3 | date | | not null | | | plain | |
-Check constraints:
- "p21chk" CHECK (c2 <> ''::text)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR
-ERROR: child table is missing constraint "pt2chk1"
+ERROR: relation "pt2_1" does not exist
ALTER FOREIGN TABLE pt2_1 ADD CONSTRAINT pt2chk1 CHECK (c1 > 0);
+ERROR: relation "pt2_1" does not exist
ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1);
+ERROR: relation "pt2_1" does not exist
-- TRUNCATE doesn't work on foreign tables, either directly or recursively
TRUNCATE pt2_1; -- ERROR
-ERROR: "pt2_1" is not a table
+ERROR: relation "pt2_1" does not exist
TRUNCATE pt2; -- ERROR
-ERROR: "pt2_1" is not a table
DROP FOREIGN TABLE pt2_1;
+ERROR: foreign table "pt2_1" does not exist
DROP TABLE pt2;
+ -- foreign table cannot be part of partition tree made of temporary
+ -- relations.
+ CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a);
+ CREATE FOREIGN TABLE foreign_part PARTITION OF temp_parted
+ FOR VALUES IN (1, 2) SERVER s0; -- ERROR
+ ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted"
+ CREATE FOREIGN TABLE foreign_part (a int) SERVER s0;
+ ALTER TABLE temp_parted ATTACH PARTITION foreign_part
+ FOR VALUES IN (1, 2); -- ERROR
+ ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted"
+ DROP FOREIGN TABLE foreign_part;
+ DROP TABLE temp_parted;
-- Cleanup
DROP SCHEMA foreign_schema CASCADE;
DROP ROLE regress_test_role; -- ERROR
text search parser | addr_nsp | addr_ts_prs | addr_nsp.addr_ts_prs | t
text search configuration | addr_nsp | addr_ts_conf | addr_nsp.addr_ts_conf | t
text search template | addr_nsp | addr_ts_temp | addr_nsp.addr_ts_temp | t
- (37 rows)
+ subscription | | addr_sub | addr_sub | t
+ publication | | addr_pub | addr_pub | t
+ publication relation | | | addr_nsp.gentable in publication addr_pub | t
-(46 rows)
++(40 rows)
---
--- Cleanup resources
DROP column name;
UPDATE alter_table_under_transition_tables
SET id = id;
-ERROR: column "name" does not exist
-LINE 1: SELECT (SELECT string_agg(id || '=' || name, ',') FROM d)
- ^
-QUERY: SELECT (SELECT string_agg(id || '=' || name, ',') FROM d)
-CONTEXT: PL/pgSQL function alter_table_under_transition_tables_upd_func() line 3 at RAISE
--
+ -- Test multiple reference to a transition table
+ --
+ CREATE TABLE multi_test (i int);
+ INSERT INTO multi_test VALUES (1);
+ CREATE OR REPLACE FUNCTION multi_test_trig() RETURNS trigger
+ LANGUAGE plpgsql AS $$
+ BEGIN
+ RAISE NOTICE 'count = %', (SELECT COUNT(*) FROM new_test);
+ RAISE NOTICE 'count union = %',
+ (SELECT COUNT(*)
+ FROM (SELECT * FROM new_test UNION ALL SELECT * FROM new_test) ss);
+ RETURN NULL;
+ END$$;
+ CREATE TRIGGER my_trigger AFTER UPDATE ON multi_test
+ REFERENCING NEW TABLE AS new_test OLD TABLE as old_test
+ FOR EACH STATEMENT EXECUTE PROCEDURE multi_test_trig();
+ UPDATE multi_test SET i = i;
+ NOTICE: count = 1
+ NOTICE: count union = 2
+ DROP TABLE multi_test;
+ DROP FUNCTION multi_test_trig();
+ --
-- Check type parsing and record fetching from partitioned tables
--
CREATE TABLE partitioned_table (a int, b text) PARTITION BY LIST (a);
ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag) VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text)
r2 AS
ON UPDATE TO rules_src DO VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text)
- r3 AS
- ON DELETE TO rules_src DO
- NOTIFY rules_src_deletion
+Distribute By: HASH(f1)
+Location Nodes: ALL DATANODES
--
- -- Ensure a aliased target relation for insert is correctly deparsed.
+ -- Ensure an aliased target relation for insert is correctly deparsed.
--
create rule r4 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2;
create rule r5 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1;
(24 rows)
reset enable_hashagg;
+ -- case with degenerate ORDER BY
+ explain (verbose, costs off)
+ select 'foo' as f, generate_series(1,2) as g from few order by 1;
+ QUERY PLAN
+ ----------------------------------------------
+ ProjectSet
+ Output: 'foo'::text, generate_series(1, 2)
+ -> Seq Scan on public.few
+ Output: id, dataa, datab
+ (4 rows)
+
+ select 'foo' as f, generate_series(1,2) as g from few order by 1;
+ f | g
+ -----+---
+ foo | 1
+ foo | 2
+ foo | 1
+ foo | 2
+ foo | 1
+ foo | 2
+ (6 rows)
+
-- data modification
CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data;
-INSERT INTO fewmore VALUES(generate_series(4,5));
-SELECT * FROM fewmore;
+INSERT INTO fewmore SELECT generate_series(4,5);
+SELECT * FROM fewmore ORDER BY 1;
data
------
1
"xmltable".premier_name
FROM ( SELECT xmldata.data
FROM xmldata) x,
- LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+ LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1;
- QUERY PLAN
------------------------------------------
+ QUERY PLAN
+-----------------------------------------------------------
Nested Loop
- -> Seq Scan on xmldata
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on xmldata
-> Table Function Scan on "xmltable"
-(3 rows)
+(4 rows)
EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1;
- QUERY PLAN
- -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Nested Loop
Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
- -> Seq Scan on public.xmldata
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
Output: xmldata.data
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
-> Table Function Scan on "xmltable"
Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
- Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
- (9 rows)
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+ (7 rows)
-- XMLNAMESPACES tests
SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
"xmltable".premier_name
FROM ( SELECT xmldata.data
FROM xmldata) x,
- LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+ LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1;
- QUERY PLAN
------------------------------------------
+ QUERY PLAN
+-----------------------------------------------------------
Nested Loop
- -> Seq Scan on xmldata
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Seq Scan on xmldata
-> Table Function Scan on "xmltable"
-(3 rows)
+(4 rows)
EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1;
- QUERY PLAN
- -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Nested Loop
Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
- -> Seq Scan on public.xmldata
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
Output: xmldata.data
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
-> Table Function Scan on "xmltable"
Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
- Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
- (9 rows)
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+ (7 rows)
-- XMLNAMESPACES tests
SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY g;
reset enable_hashagg;
+ -- case with degenerate ORDER BY
+ explain (verbose, costs off)
+ select 'foo' as f, generate_series(1,2) as g from few order by 1;
+ select 'foo' as f, generate_series(1,2) as g from few order by 1;
+
-- data modification
CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data;
-INSERT INTO fewmore VALUES(generate_series(4,5));
-SELECT * FROM fewmore;
+INSERT INTO fewmore SELECT generate_series(4,5);
+SELECT * FROM fewmore ORDER BY 1;
-- SRFs are not allowed in UPDATE (they once were, but it was nonsense)
UPDATE fewmore SET data = generate_series(4,9);