for values in (1, 2); -- ok
drop table perm_part_parent cascade;
drop table temp_part_parent cascade;
-
-- check that attaching partitions to a table while it is being used is
-- prevented
create table tab_part_attach (a int) partition by list (a);
end $$;
create trigger trig_part_attach before insert on tab_part_attach
for each statement execute procedure func_part_attach();
+ERROR: Postgres-XL does not support TRIGGER yet
+DETAIL: The feature is not currently supported
insert into tab_part_attach values (1);
-ERROR: cannot ALTER TABLE "tab_part_attach" because it is being used by active queries in this session
-CONTEXT: SQL statement "alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)"
-PL/pgSQL function func_part_attach() line 4 at EXECUTE
+ERROR: no partition of relation "tab_part_attach" found for row
+DETAIL: Partition key of the failing row contains (a) = (1).
drop table tab_part_attach;
drop function func_part_attach();
-- test case where the partitioning operator is a SQL function whose
drop table at_test_sql_partop;
drop operator class at_test_sql_partop using btree;
drop function at_test_sql_partop;
-
-- xl tests
create table xl_parent (a int, b int, c text) partition by list (a);
-- create a partition
end $$;
create trigger trig_part_create before insert on tab_part_create
for each statement execute procedure func_part_create();
+ERROR: Postgres-XL does not support TRIGGER yet
+DETAIL: The feature is not currently supported
insert into tab_part_create values (1);
-ERROR: cannot CREATE TABLE .. PARTITION OF "tab_part_create" because it is being used by active queries in this session
-CONTEXT: SQL statement "create table tab_part_create_1 partition of tab_part_create for values in (1)"
-PL/pgSQL function func_part_create() line 3 at EXECUTE
+ERROR: no partition of relation "tab_part_create" found for row
+DETAIL: Partition key of the failing row contains (a) = (1).
drop table tab_part_create;
drop function func_part_create();
-- xl tests
DETAIL: drop cascades to table evttrig.one
drop cascades to table evttrig.two
drop cascades to table evttrig.parted
-NOTICE: NORMAL: orig=t normal=f istemp=f type=schema identity=evttrig name={evttrig} args={}
-NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.one name={evttrig,one} args={}
-NOTICE: NORMAL: orig=f normal=t istemp=f type=sequence identity=evttrig.one_col_a_seq name={evttrig,one_col_a_seq} args={}
-NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_a name={evttrig,one,col_a} args={}
-NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.two name={evttrig,two} args={}
-NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.parted name={evttrig,parted} args={}
-NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_1_10 name={evttrig,part_1_10} args={}
-NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_20 name={evttrig,part_10_20} args={}
-NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_15 name={evttrig,part_10_15} args={}
-NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_15_20 name={evttrig,part_15_20} args={}
DROP TABLE a_temp_tbl;
DROP EVENT TRIGGER regress_event_trigger_report_dropped;
ERROR: event trigger "regress_event_trigger_report_dropped" does not exist
SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA';
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
+-- Check behavior with rewinding to a previous child scan node,
+-- as per bug #15395
+BEGIN;
+CREATE TABLE current_check (currentid int, payload text);
+CREATE TABLE current_check_1 () INHERITS (current_check);
+CREATE TABLE current_check_2 () INHERITS (current_check);
+INSERT INTO current_check_1 SELECT i, 'p' || i FROM generate_series(1,9) i;
+INSERT INTO current_check_2 SELECT i, 'P' || i FROM generate_series(10,19) i;
+DECLARE c1 SCROLL CURSOR FOR SELECT * FROM current_check ORDER BY currentid;
+-- This tests the fetch-backwards code path
+FETCH ABSOLUTE 12 FROM c1;
+ currentid | payload
+-----------+---------
+ 12 | P12
+(1 row)
+
+FETCH ABSOLUTE 8 FROM c1;
+ currentid | payload
+-----------+---------
+ 8 | p8
+(1 row)
+
+DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *;
+ERROR: WHERE CURRENT OF clause not yet supported
+-- This tests the ExecutorRewind code path
+FETCH ABSOLUTE 13 FROM c1;
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+FETCH ABSOLUTE 1 FROM c1;
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *;
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+SELECT * FROM current_check;
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+ROLLBACK;
-- Make sure snapshot management works okay, per bug report in
BEGIN;
explain (costs off, verbose)
select count(*) from tenk1 a where (unique1, two) in
(select unique1, row_number() over() from tenk1 b);
- QUERY PLAN
-----------------------------------------------------------------------------------------------
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------
Aggregate
Output: count(*)
-> Hash Semi Join
Hash Cond: ((a.unique1 = b.unique1) AND (a.two = (row_number() OVER (?))))
- -> Gather
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
Output: a.unique1, a.two
- Workers Planned: 4
- -> Parallel Seq Scan on public.tenk1 a
+ -> Gather
Output: a.unique1, a.two
+ Workers Planned: 4
+ -> Parallel Seq Scan on public.tenk1 a
+ Output: a.unique1, a.two
-> Hash
Output: b.unique1, (row_number() OVER (?))
-> WindowAgg
Output: b.unique1, row_number() OVER (?)
- -> Gather
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
Output: b.unique1
- Workers Planned: 4
- -> Parallel Index Only Scan using tenk1_unique1 on public.tenk1 b
+ -> Gather
Output: b.unique1
-(18 rows)
+ Workers Planned: 4
+ -> Parallel Index Only Scan using tenk1_unique1 on public.tenk1 b
+ Output: b.unique1
+(22 rows)
-- LIMIT/OFFSET within sub-selects can't be pushed to workers.
explain (costs off)
select * from tenk1 a where two in
(select two from tenk1 b where stringu1 like '%AAAA' limit 3);
- QUERY PLAN
----------------------------------------------------------------
+ QUERY PLAN
+---------------------------------------------------------------------------
Hash Semi Join
Hash Cond: (a.two = b.two)
- -> Gather
- Workers Planned: 4
- -> Parallel Seq Scan on tenk1 a
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Gather
+ Workers Planned: 4
+ -> Parallel Seq Scan on tenk1 a
-> Hash
-> Limit
- -> Gather
- Workers Planned: 4
- -> Parallel Seq Scan on tenk1 b
- Filter: (stringu1 ~~ '%AAAA'::text)
-(11 rows)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Limit
+ -> Gather
+ Workers Planned: 4
+ -> Parallel Seq Scan on tenk1 b
+ Filter: (stringu1 ~~ '%AAAA'::text)
+(14 rows)
-- to increase the parallel query test coverage
EXPLAIN (timing off, summary off, costs off) SELECT * FROM tenk1;
INSERT INTO fooarr VALUES('1', ARRAY[1,2], 'one');
PREPARE pstmt(text, int[]) AS SELECT * FROM fooarr WHERE f1 = $1 AND f2 = $2;
EXPLAIN (COSTS OFF) EXECUTE pstmt('1', make_some_array(1,2));
- QUERY PLAN
-------------------------------------------------------------------
- Gather
- Workers Planned: 3
- -> Parallel Seq Scan on fooarr
- Filter: ((f1 = '1'::text) AND (f2 = '{1,2}'::integer[]))
-(4 rows)
+ QUERY PLAN
+----------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ ->
+(3 rows)
EXECUTE pstmt('1', make_some_array(1,2));
f1 | f2 | f3
INSERT INTO current_check_1 SELECT i, 'p' || i FROM generate_series(1,9) i;
INSERT INTO current_check_2 SELECT i, 'P' || i FROM generate_series(10,19) i;
-DECLARE c1 SCROLL CURSOR FOR SELECT * FROM current_check;
+DECLARE c1 SCROLL CURSOR FOR SELECT * FROM current_check ORDER BY currentid;
-- This tests the fetch-backwards code path
FETCH ABSOLUTE 12 FROM c1;