diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index de5b6992b6d..81c1a3340fb 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -2340,6 +2340,7 @@ get_prestate_rte(RangeTblEntry *rte, MV_TriggerTable *table,
rte->relkind = 0;
rte->rellockmode = 0;
rte->tablesample = NULL;
+ rte->perminfoindex = 0; /* subquery RTE does not need permission check */
rte->inh = false; /* must not be set for a subquery */
return rte;
@@ -2403,6 +2404,7 @@ replace_rte_with_delta(RangeTblEntry *rte, MV_TriggerTable *table, bool is_new,
rte->relkind = 0;
rte->rellockmode = 0;
rte->tablesample = NULL;
+ rte->perminfoindex = 0; /* subquery RTE does not need permission check */
rte->inh = false; /* must not be set for a subquery */
return rte;
diff --git a/src/backend/executor/nodeSplitMerge.c b/src/backend/executor/nodeSplitMerge.c
index 2b0bc158b55..09b5a0d4b97 100644
--- a/src/backend/executor/nodeSplitMerge.c
+++ b/src/backend/executor/nodeSplitMerge.c
@@ -146,7 +146,10 @@ MergeTupleTableSlot(TupleTableSlot *slot, SplitMerge *plannode, SplitMergeState
/* Compute segment ID for the new row */
int32 target_seg;
- target_seg = evalHashKey(node, newslot->tts_values, newslot->tts_isnull);
+ if (node->cdbhash)
+ target_seg = evalHashKey(node, newslot->tts_values, newslot->tts_isnull);
+ else
+ target_seg = cdbhashrandomseg(plannode->numHashSegments);
slot->tts_values[node->segid_attno - 1] = Int32GetDatum(target_seg);
slot->tts_isnull[node->segid_attno - 1] = false;
diff --git a/src/backend/gpopt/gpdbwrappers.cpp b/src/backend/gpopt/gpdbwrappers.cpp
index 947cd3f985a..1d15375b417 100644
--- a/src/backend/gpopt/gpdbwrappers.cpp
+++ b/src/backend/gpopt/gpdbwrappers.cpp
@@ -34,6 +34,7 @@
#include "catalog/pg_collation.h"
extern "C" {
#include "access/amapi.h"
+#include "commands/defrem.h"
#include "access/external.h"
#include "access/genam.h"
#include "catalog/pg_inherits.h"
@@ -1960,8 +1961,8 @@ gpdb::GetMVNDistinct(Oid stat_oid)
{
GP_WRAP_START;
{
- /* CBDB_16_MERGE: xxx: do we need ihn = true in any case? */
- return statext_ndistinct_load(stat_oid, false);
+ bool inh = has_subclass(StatisticsGetRelation(stat_oid, false));
+ return statext_ndistinct_load(stat_oid, inh);
}
GP_WRAP_END;
}
@@ -1971,7 +1972,8 @@ gpdb::GetMVDependencies(Oid stat_oid)
{
GP_WRAP_START;
{
- return statext_dependencies_load(stat_oid, false, true);
+ bool inh = has_subclass(StatisticsGetRelation(stat_oid, false));
+ return statext_dependencies_load(stat_oid, inh, true);
}
GP_WRAP_END;
}
@@ -2818,11 +2820,12 @@ gpdb::TestexprIsHashable(Node *testexpr, List *param_ids)
}
RTEPermissionInfo *
-gpdb::GetRTEPermissionInfo(List *rteperminfos,
- const RangeTblEntry *rte)
+gpdb::GetRTEPermissionInfo(List *rteperminfos, const RangeTblEntry *rte)
{
GP_WRAP_START;
{
+ // Cast away const: upstream getRTEPermissionInfo() only reads
+ // rte->perminfoindex and rte->relid but its signature lacks const.
return getRTEPermissionInfo(rteperminfos, (RangeTblEntry *) rte);
}
GP_WRAP_END;
diff --git a/src/backend/gpopt/translate/CContextDXLToPlStmt.cpp b/src/backend/gpopt/translate/CContextDXLToPlStmt.cpp
index 16eec1ba5a6..b31508be45c 100644
--- a/src/backend/gpopt/translate/CContextDXLToPlStmt.cpp
+++ b/src/backend/gpopt/translate/CContextDXLToPlStmt.cpp
@@ -594,16 +594,16 @@ CContextDXLToPlStmt::GetRTEIndexByAssignedQueryId(
//---------------------------------------------------------------------------
// @function:
-// CContextDXLToPlStmt::AddPerfmInfo
+// CContextDXLToPlStmt::AddPermInfo
//
// @doc:
-// Add a Perfission Info list entry
+// Add a Permission Info list entry
//
//---------------------------------------------------------------------------
void
-CContextDXLToPlStmt::AddPerfmInfo(RTEPermissionInfo *pi)
+CContextDXLToPlStmt::AddPermInfo(RTEPermissionInfo *pi)
{
- // add rte to rtable entries list
+ // add permission info to list
m_perminfo_list = gpdb::LAppend(m_perminfo_list, pi);
}
diff --git a/src/backend/gpopt/translate/CTranslatorDXLToPlStmt.cpp b/src/backend/gpopt/translate/CTranslatorDXLToPlStmt.cpp
index f634786e982..a0b2cb04d8b 100644
--- a/src/backend/gpopt/translate/CTranslatorDXLToPlStmt.cpp
+++ b/src/backend/gpopt/translate/CTranslatorDXLToPlStmt.cpp
@@ -659,9 +659,24 @@ CTranslatorDXLToPlStmt::TranslateDXLTblScan(
// The postgres_fdw wrapper does not support row level security. So
// passing only the query_quals while creating the foreign scan node.
+ //
+ // BuildForeignScan internally calls build_simple_rel which looks up
+ // RTEPermissionInfo via root->parse->rteperminfos. The RTE here was
+ // newly created by ORCA with its own perminfoindex numbering, which
+ // may not match m_orig_query->rteperminfos (e.g. after the rewriter
+ // expands external-table ON SELECT rules into subqueries the outer
+ // query's rteperminfos shrinks). Temporarily swap in ORCA's own
+ // perminfos list so the indices are consistent.
+ Query *orig_query = m_dxl_to_plstmt_context->m_orig_query;
+ List *saved_perminfos = orig_query->rteperminfos;
+ orig_query->rteperminfos =
+ m_dxl_to_plstmt_context->GetPermInfosList();
+
ForeignScan *foreign_scan =
gpdb::CreateForeignScan(oidRel, index, query_quals, targetlist,
- m_dxl_to_plstmt_context->m_orig_query, rte);
+ orig_query, rte);
+
+ orig_query->rteperminfos = saved_perminfos;
foreign_scan->scan.scanrelid = index;
plan = &(foreign_scan->scan.plan);
plan_return = (Plan *) foreign_scan;
@@ -4611,9 +4626,15 @@ CTranslatorDXLToPlStmt::TranslateDXLDynForeignScan(
RelationGetDescr(childRel),
index, qual, targetlist);
+ // Same perminfos swap as in the non-dynamic foreign scan path above.
+ Query *orig_query = m_dxl_to_plstmt_context->m_orig_query;
+ List *saved_perminfos = orig_query->rteperminfos;
+ orig_query->rteperminfos =
+ m_dxl_to_plstmt_context->GetPermInfosList();
+
ForeignScan *foreign_scan_first_part =
gpdb::CreateForeignScan(oid_first_child, index, qual, targetlist,
- m_dxl_to_plstmt_context->m_orig_query, rte);
+ orig_query, rte);
// Set the plan fields to the first partition. We still want the plan type to be
// a dynamic foreign scan
@@ -4645,11 +4666,14 @@ CTranslatorDXLToPlStmt::TranslateDXLDynForeignScan(
ForeignScan *foreign_scan =
gpdb::CreateForeignScan(rte->relid, index, qual, targetlist,
- m_dxl_to_plstmt_context->m_orig_query, rte);
+ orig_query, rte);
dyn_foreign_scan->fdw_private_list = gpdb::LAppend(
dyn_foreign_scan->fdw_private_list, foreign_scan->fdw_private);
}
+
+ orig_query->rteperminfos = saved_perminfos;
+
// convert qual and targetlist back to root relation. This is used by the
// executor node to remap to the children
gpdb::RelationWrapper prevRel = gpdb::GetRelation(rte->relid);
@@ -5336,7 +5360,7 @@ CTranslatorDXLToPlStmt::ProcessDXLTblDescr(
rte->eref = alias;
rte->alias = alias;
- m_dxl_to_plstmt_context->AddPerfmInfo(pi);
+ m_dxl_to_plstmt_context->AddPermInfo(pi);
// set up rte <> perm info link.
rte->perminfoindex = gpdb::ListLength(
diff --git a/src/backend/optimizer/plan/orca.c b/src/backend/optimizer/plan/orca.c
index ffebbc8d8d7..255db185c5e 100644
--- a/src/backend/optimizer/plan/orca.c
+++ b/src/backend/optimizer/plan/orca.c
@@ -518,7 +518,6 @@ transformGroupedWindows(Node *node, void *context)
Query *subq;
RangeTblEntry *rte;
- RTEPermissionInfo *perminfo;
RangeTblRef *ref;
Alias *alias;
bool hadSubLinks = qry->hasSubLinks;
@@ -545,6 +544,7 @@ transformGroupedWindows(Node *node, void *context)
/* Core of subquery input table expression: */
subq->rtable = qry->rtable; /* before windowing */
+ subq->rteperminfos = qry->rteperminfos; /* before windowing */
subq->jointree = qry->jointree; /* before windowing */
subq->targetList = NIL; /* fill in later */
@@ -578,11 +578,9 @@ transformGroupedWindows(Node *node, void *context)
rte->eref = NULL; /* fill in later */
rte->inFromCl = true;
- perminfo = makeNode(RTEPermissionInfo);
- perminfo->requiredPerms = ACL_SELECT;
-
/*
- * Default? rte->inh = 0; rte->checkAsUser = 0;
+ * Subquery RTEs do not need RTEPermissionInfo. Permission checks
+ * are performed on the base tables within the subquery itself.
*/
/*
@@ -605,7 +603,7 @@ transformGroupedWindows(Node *node, void *context)
/* Core of outer query input table expression: */
qry->rtable = list_make1(rte);
- qry->rteperminfos = list_make1(perminfo);
+ qry->rteperminfos = NIL;
qry->jointree = (FromExpr *) makeNode(FromExpr);
qry->jointree->fromlist = list_make1(ref);
qry->jointree->quals = NULL;
diff --git a/src/include/gpopt/translate/CContextDXLToPlStmt.h b/src/include/gpopt/translate/CContextDXLToPlStmt.h
index 1b88fb56711..8ff32e9b0f5 100644
--- a/src/include/gpopt/translate/CContextDXLToPlStmt.h
+++ b/src/include/gpopt/translate/CContextDXLToPlStmt.h
@@ -109,7 +109,7 @@ class CContextDXLToPlStmt
// list of all rtable entries
List *m_rtable_entries_list;
- // list of all rtable entries
+ // list of all RTEPermissionInfo entries
List *m_perminfo_list;
// list of all subplan entries
@@ -249,8 +249,8 @@ class CContextDXLToPlStmt
Index GetRTEIndexByAssignedQueryId(ULONG assigned_query_id_for_target_rel,
BOOL *is_rte_exists);
- // add a perm info.
- void AddPerfmInfo(RTEPermissionInfo *pi);
+ // add a permission info entry
+ void AddPermInfo(RTEPermissionInfo *pi);
// get perm info from m_perminfo_list by given index
RTEPermissionInfo *GetPermInfoByIndex(Index index);
diff --git a/src/test/regress/expected/aggregates_optimizer.out b/src/test/regress/expected/aggregates_optimizer.out
index 86dd330de84..ab58f0cb43b 100644
--- a/src/test/regress/expected/aggregates_optimizer.out
+++ b/src/test/regress/expected/aggregates_optimizer.out
@@ -4,8 +4,18 @@
-- start_ignore
SET optimizer_trace_fallback to on;
-- end_ignore
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
-- avoid bit-exact output here because operations may not be bit-exact.
SET extra_float_digits = 0;
+-- prepare some test data
+CREATE TABLE aggtest (
+ a int2,
+ b float4
+);
+\set filename :abs_srcdir '/data/agg.data'
+COPY aggtest FROM :'filename';
+ANALYZE aggtest;
SELECT avg(four) AS avg_1 FROM onek;
avg_1
--------------------
@@ -18,6 +28,30 @@ SELECT avg(a) AS avg_32 FROM aggtest WHERE a < 100;
32.6666666666666667
(1 row)
+SELECT any_value(v) FROM (VALUES (1), (2), (3)) AS v (v);
+ any_value
+-----------
+ 1
+(1 row)
+
+SELECT any_value(v) FROM (VALUES (NULL)) AS v (v);
+ any_value
+-----------
+
+(1 row)
+
+SELECT any_value(v) FROM (VALUES (NULL), (1), (2)) AS v (v);
+ any_value
+-----------
+ 1
+(1 row)
+
+SELECT any_value(v) FROM (VALUES (array['hello', 'world'])) AS v (v);
+ any_value
+---------------
+ {hello,world}
+(1 row)
+
-- In 7.1, avg(float4) is computed using float8 arithmetic.
-- Round the result to 3 digits to avoid platform-specific results.
SELECT avg(b)::numeric(10,3) AS avg_107_943 FROM aggtest;
@@ -1305,6 +1339,40 @@ NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table minmaxtest1
drop cascades to table minmaxtest2
drop cascades to table minmaxtest3
+-- DISTINCT can also trigger wrong answers with hash aggregation (bug #18465)
+begin;
+set local enable_sort = off;
+explain (costs off)
+ select f1, (select distinct min(t1.f1) from int4_tbl t1 where t1.f1 = t0.f1)
+ from int4_tbl t0;
+ QUERY PLAN
+-----------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on int4_tbl t0
+ SubPlan 1
+ -> GroupAggregate
+ Group Key: min(t1.f1)
+ -> Aggregate
+ -> Result
+ Filter: (t1.f1 = t0.f1)
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int4_tbl t1
+ Optimizer: GPORCA
+(12 rows)
+
+select f1, (select distinct min(t1.f1) from int4_tbl t1 where t1.f1 = t0.f1)
+from int4_tbl t0;
+ f1 | min
+-------------+-------------
+ 0 | 0
+ 123456 | 123456
+ -123456 | -123456
+ 2147483647 | 2147483647
+ -2147483647 | -2147483647
+(5 rows)
+
+rollback;
-- check for correct detection of nested-aggregate errors
select max(min(unique1)) from tenk1;
ERROR: aggregate function calls cannot be nested
@@ -1452,8 +1520,8 @@ drop table p_t1;
--
-- Test GROUP BY matching of join columns that are type-coerced due to USING
--
-create temp table t1(f1 int, f2 bigint);
-create temp table t2(f1 bigint, f22 bigint);
+create temp table t1(f1 int, f2 int);
+create temp table t2(f1 bigint, f2 oid);
select f1 from t1 left join t2 using (f1) group by f1;
f1
----
@@ -1474,7 +1542,198 @@ select t1.f1 from t1 left join t2 using (f1) group by f1;
ERROR: column "t1.f1" must appear in the GROUP BY clause or be used in an aggregate function
LINE 1: select t1.f1 from t1 left join t2 using (f1) group by f1;
^
+-- check case where we have to inject nullingrels into coerced join alias
+select f1, count(*) from
+t1 x(x0,x1) left join (t1 left join t2 using(f1)) on (x0 = 0)
+group by f1;
+ f1 | count
+----+-------
+(0 rows)
+
+-- same, for a RelabelType coercion
+select f2, count(*) from
+t1 x(x0,x1) left join (t1 left join t2 using(f2)) on (x0 = 0)
+group by f2;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: CTranslatorQueryToDXL.cpp:4130: Failed assertion: ((((const Node*)(join_alias_node))->type) == T_Var) || ((((const Node*)(join_alias_node))->type) == T_FuncExpr) || ((((const Node*)(join_alias_node))->type) == T_CoalesceExpr)
+ f2 | count
+----+-------
+(0 rows)
+
drop table t1, t2;
+--
+-- Test planner's selection of pathkeys for ORDER BY aggregates
+--
+-- Ensure we order by four. This suits the most aggregate functions.
+explain (costs off)
+select sum(two order by two),max(four order by four), min(four order by four)
+from tenk1;
+QUERY PLAN
+___________
+ Finalize Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Partial Aggregate
+ -> Seq Scan on tenk1
+ Optimizer: Pivotal Optimizer (GPORCA)
+
+-- Ensure we order by two. It's a tie between ordering by two and four but
+-- we tiebreak on the aggregate's position.
+explain (costs off)
+select
+ sum(two order by two), max(four order by four),
+ min(four order by four), max(two order by two)
+from tenk1;
+QUERY PLAN
+___________
+ Finalize Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Partial Aggregate
+ -> Seq Scan on tenk1
+ Optimizer: Pivotal Optimizer (GPORCA)
+
+-- Similar to above, but tiebreak on ordering by four
+explain (costs off)
+select
+ max(four order by four), sum(two order by two),
+ min(four order by four), max(two order by two)
+from tenk1;
+QUERY PLAN
+___________
+ Finalize Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Partial Aggregate
+ -> Seq Scan on tenk1
+ Optimizer: Pivotal Optimizer (GPORCA)
+
+-- Ensure this one orders by ten since there are 3 aggregates that require ten
+-- vs two that suit two and four.
+explain (costs off)
+select
+ max(four order by four), sum(two order by two),
+ min(four order by four), max(two order by two),
+ sum(ten order by ten), min(ten order by ten), max(ten order by ten)
+from tenk1;
+QUERY PLAN
+___________
+ Finalize Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Partial Aggregate
+ -> Seq Scan on tenk1
+ Optimizer: Pivotal Optimizer (GPORCA)
+
+-- Try a case involving a GROUP BY clause where the GROUP BY column is also
+-- part of an aggregate's ORDER BY clause. We want a sort order that works
+-- for the GROUP BY along with the first and the last aggregate.
+explain (costs off)
+select
+ sum(unique1 order by ten, two), sum(unique1 order by four),
+ sum(unique1 order by two, four)
+from tenk1
+group by ten;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Hash aggregation with ORDER BY
+QUERY PLAN
+___________
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> GroupAggregate
+ Group Key: ten
+ -> Sort
+ Sort Key: ten, two, four
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: ten
+ -> Seq Scan on tenk1
+ Optimizer: Pivotal Optimizer (GPORCA)
+
+-- Ensure that we never choose to provide presorted input to an Aggref with
+-- a volatile function in the ORDER BY / DISTINCT clause. We want to ensure
+-- these sorts are performed individually rather than at the query level.
+explain (costs off)
+select
+ sum(unique1 order by two), sum(unique1 order by four),
+ sum(unique1 order by four, two), sum(unique1 order by two, random()),
+ sum(unique1 order by two, random(), random() + 1)
+from tenk1
+group by ten;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Hash aggregation with ORDER BY
+ QUERY PLAN
+------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> GroupAggregate
+ Group Key: ten
+ -> Sort
+ Sort Key: ten, four, two
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: ten
+ -> Seq Scan on tenk1
+ Optimizer: Postgres query optimizer
+(9 rows)
+
+-- Ensure consecutive NULLs are properly treated as distinct from each other
+select array_agg(distinct val)
+from (select null as val from generate_series(1, 2));
+ array_agg
+-----------
+ {NULL}
+(1 row)
+
+-- Ensure no ordering is requested when enable_presorted_aggregate is off
+set enable_presorted_aggregate to off;
+explain (costs off)
+select sum(two order by two) from tenk1;
+QUERY PLAN
+___________
+ Finalize Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Partial Aggregate
+ -> Seq Scan on tenk1
+ Optimizer: Pivotal Optimizer (GPORCA)
+
+reset enable_presorted_aggregate;
+--
+-- Test cases with FILTER clause
+--
+-- Ensure we presort when the aggregate contains plain Vars
+explain (costs off)
+select sum(two order by two) filter (where two > 1) from tenk1;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Aggregate functions with FILTER
+QUERY PLAN
+___________
+ Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on tenk1
+ Optimizer: Pivotal Optimizer (GPORCA)
+
+-- Ensure we presort for RelabelType'd Vars
+explain (costs off)
+select string_agg(distinct f1, ',') filter (where length(f1) > 1)
+from varchar_tbl;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Aggregate functions with FILTER
+QUERY PLAN
+___________
+ Finalize Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Partial Aggregate
+ -> Seq Scan on varchar_tbl
+ Optimizer: Pivotal Optimizer (GPORCA)
+
+-- Ensure we don't presort when the aggregate's argument contains an
+-- explicit cast.
+explain (costs off)
+select string_agg(distinct f1::varchar(2), ',') filter (where length(f1) > 1)
+from varchar_tbl;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Aggregate functions with FILTER
+ QUERY PLAN
+------------------------------------------------
+ Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on varchar_tbl
+ Optimizer: Postgres query optimizer
+(4 rows)
+
--
-- Test combinations of DISTINCT and/or ORDER BY
--
@@ -1638,12 +1897,24 @@ select aggfns(distinct a,b,c order by a,c using ~<~,b)
generate_series(1,2) i;
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: ROW EXPRESSION
-CONTEXT: SQL function "aggfns_trans" during startup
aggfns
-----------------------------------------------
{"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
+-- test a more complex permutation that has previous caused issues
+select
+ string_agg(distinct 'a', ','),
+ sum((
+ select sum(1)
+ from (values(1)) b(id)
+ where a.id = b.id
+)) from unnest(array[1]) a(id);
+ string_agg | sum
+------------+-----
+ a | 1
+(1 row)
+
-- check node I/O via view creation and usage, also deparsing logic
-- start_ignore
-- pg_get_viewdef() runs some internal queries on catalogs, and we don't want
@@ -1662,7 +1933,7 @@ select * from agg_view1;
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
- SELECT aggfns(v.a, v.b, v.c) AS aggfns +
+ SELECT aggfns(a, b, c) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
@@ -1714,7 +1985,7 @@ select * from agg_view1;
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
- SELECT aggfns(v.a, v.b, v.c ORDER BY (v.b + 1)) AS aggfns +
+ SELECT aggfns(a, b, c ORDER BY (b + 1)) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
@@ -1730,7 +2001,7 @@ select * from agg_view1;
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
- SELECT aggfns(v.a, v.a, v.c ORDER BY v.b) AS aggfns +
+ SELECT aggfns(a, a, c ORDER BY b) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
@@ -1746,7 +2017,7 @@ select * from agg_view1;
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
- SELECT aggfns(v.a, v.b, v.c ORDER BY v.c USING ~<~ NULLS LAST) AS aggfns +
+ SELECT aggfns(a, b, c ORDER BY c USING ~<~ NULLS LAST) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
@@ -1985,14 +2256,14 @@ from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: sum is not an ordered-set aggregate, so it cannot have WITHIN GROUP
-LINE 1: select p, sum() within group (order by x::float8)
+LINE 1: select p, sum() within group (order by x::float8) -- error
^
select p, percentile_cont(p,p) -- error
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: WITHIN GROUP is required for ordered-set aggregate percentile_cont
-LINE 1: select p, percentile_cont(p,p)
+LINE 1: select p, percentile_cont(p,p) -- error
^
select percentile_cont(0.5) within group (order by b) from aggtest;
percentile_cont
@@ -2193,15 +2464,15 @@ select ten,
reset optimizer_trace_fallback;
-- end_ignore
select pg_get_viewdef('aggordview1');
- pg_get_viewdef
--------------------------------------------------------------------------------------------------------------------------------
- SELECT tenk1.ten, +
- percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY tenk1.thousand) AS p50, +
- percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY tenk1.thousand) FILTER (WHERE (tenk1.hundred = 1)) AS px,+
- rank(5, 'AZZZZ'::name, 50) WITHIN GROUP (ORDER BY tenk1.hundred, tenk1.string4 DESC, tenk1.hundred) AS rank +
- FROM tenk1 +
- GROUP BY tenk1.ten +
- ORDER BY tenk1.ten;
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------
+ SELECT ten, +
+ percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) AS p50, +
+ percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) FILTER (WHERE (hundred = 1)) AS px,+
+ rank(5, 'AZZZZ'::name, 50) WITHIN GROUP (ORDER BY hundred, string4 DESC, hundred) AS rank +
+ FROM tenk1 +
+ GROUP BY ten +
+ ORDER BY ten;
(1 row)
-- start_ignore
@@ -2276,6 +2547,76 @@ select string_agg(v, decode('ee', 'hex')) from bytea_test_table;
(1 row)
drop table bytea_test_table;
+-- Test parallel string_agg and array_agg
+create table pagg_test (x int, y int) with (autovacuum_enabled = off);
+insert into pagg_test
+select (case x % 4 when 1 then null else x end), x % 10
+from generate_series(1,5000) x;
+set parallel_setup_cost TO 0;
+set parallel_tuple_cost TO 0;
+set parallel_leader_participation TO 0;
+set min_parallel_table_scan_size = 0;
+set bytea_output = 'escape';
+set max_parallel_workers_per_gather = 2;
+-- create a view as we otherwise have to repeat this query a few times.
+create view v_pagg_test AS
+select
+ y,
+ min(t) AS tmin,max(t) AS tmax,count(distinct t) AS tndistinct,
+ min(b) AS bmin,max(b) AS bmax,count(distinct b) AS bndistinct,
+ min(a) AS amin,max(a) AS amax,count(distinct a) AS andistinct,
+ min(aa) AS aamin,max(aa) AS aamax,count(distinct aa) AS aandistinct
+from (
+ select
+ y,
+ unnest(regexp_split_to_array(a1.t, ','))::int AS t,
+ unnest(regexp_split_to_array(a1.b::text, ',')) AS b,
+ unnest(a1.a) AS a,
+ unnest(a1.aa) AS aa
+ from (
+ select
+ y,
+ string_agg(x::text, ',') AS t,
+ string_agg(x::text::bytea, ',') AS b,
+ array_agg(x) AS a,
+ array_agg(ARRAY[x]) AS aa
+ from pagg_test
+ group by y
+ ) a1
+) a2
+group by y;
+-- Ensure results are correct.
+-- Check that we don't fail on anonymous record types.
+set max_parallel_workers_per_gather = 2;
+explain (costs off)
+select array_dims(array_agg(s)) from (select * from pagg_test) s;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Whole-row variable
+ QUERY PLAN
+------------------------------------------------
+ Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on pagg_test
+ Optimizer: Postgres query optimizer
+(4 rows)
+
+select array_dims(array_agg(s)) from (select * from pagg_test) s;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Whole-row variable
+ array_dims
+------------
+ [1:5000]
+(1 row)
+
+-- Clean up
+reset max_parallel_workers_per_gather;
+reset bytea_output;
+reset min_parallel_table_scan_size;
+reset parallel_leader_participation;
+reset parallel_tuple_cost;
+reset parallel_setup_cost;
+drop view v_pagg_test;
+drop table pagg_test;
-- FILTER tests
select min(unique1) filter (where unique1 > 100) from tenk1;
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
@@ -2334,6 +2675,14 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
a
(1 row)
+select any_value(v) filter (where v > 2) from (values (1), (2), (3)) as v (v);
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Aggregate functions with FILTER
+ any_value
+-----------
+ 3
+(1 row)
+
-- outer reference in FILTER (PostgreSQL extension)
select (select count(*)
from (values (1)) t0(inner_c))
@@ -2464,14 +2813,14 @@ from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: sum is not an ordered-set aggregate, so it cannot have WITHIN GROUP
-LINE 1: select p, sum() within group (order by x::float8)
+LINE 1: select p, sum() within group (order by x::float8) -- error
^
select p, percentile_cont(p,p) -- error
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: WITHIN GROUP is required for ordered-set aggregate percentile_cont
-LINE 1: select p, percentile_cont(p,p)
+LINE 1: select p, percentile_cont(p,p) -- error
^
select percentile_cont(0.5) within group (order by b) from aggtest;
percentile_cont
@@ -2672,15 +3021,15 @@ select ten,
reset optimizer_trace_fallback;
-- end_ignore
select pg_get_viewdef('aggordview1');
- pg_get_viewdef
--------------------------------------------------------------------------------------------------------------------------------
- SELECT tenk1.ten, +
- percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY tenk1.thousand) AS p50, +
- percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY tenk1.thousand) FILTER (WHERE (tenk1.hundred = 1)) AS px,+
- rank(5, 'AZZZZ'::name, 50) WITHIN GROUP (ORDER BY tenk1.hundred, tenk1.string4 DESC, tenk1.hundred) AS rank +
- FROM tenk1 +
- GROUP BY tenk1.ten +
- ORDER BY tenk1.ten;
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------
+ SELECT ten, +
+ percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) AS p50, +
+ percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) FILTER (WHERE (hundred = 1)) AS px,+
+ rank(5, 'AZZZZ'::name, 50) WITHIN GROUP (ORDER BY hundred, string4 DESC, hundred) AS rank +
+ FROM tenk1 +
+ GROUP BY ten +
+ ORDER BY ten;
(1 row)
-- start_ignore
@@ -3067,6 +3416,43 @@ SELECT balk(hundred) FROM tenk1;
(1 row)
+ROLLBACK;
+-- test multiple usage of an aggregate whose finalfn returns a R/W datum
+BEGIN;
+CREATE FUNCTION rwagg_sfunc(x anyarray, y anyarray) RETURNS anyarray
+LANGUAGE plpgsql IMMUTABLE AS $$
+BEGIN
+ RETURN array_fill(y[1], ARRAY[4]);
+END;
+$$;
+CREATE FUNCTION rwagg_finalfunc(x anyarray) RETURNS anyarray
+LANGUAGE plpgsql STRICT IMMUTABLE AS $$
+DECLARE
+ res x%TYPE;
+BEGIN
+ -- assignment is essential for this test, it expands the array to R/W
+ res := array_fill(x[1], ARRAY[4]);
+ RETURN res;
+END;
+$$;
+CREATE AGGREGATE rwagg(anyarray) (
+ STYPE = anyarray,
+ SFUNC = rwagg_sfunc,
+ FINALFUNC = rwagg_finalfunc
+);
+CREATE FUNCTION eatarray(x real[]) RETURNS real[]
+LANGUAGE plpgsql STRICT IMMUTABLE AS $$
+BEGIN
+ x[1] := x[1] + 1;
+ RETURN x;
+END;
+$$;
+SELECT eatarray(rwagg(ARRAY[1.0::real])), eatarray(rwagg(ARRAY[1.0::real]));
+ eatarray | eatarray
+-----------+-----------
+ {2,1,1,1} | {2,1,1,1}
+(1 row)
+
ROLLBACK;
-- test coverage for aggregate combine/serial/deserial functions
BEGIN;
diff --git a/src/test/regress/expected/autostats.out b/src/test/regress/expected/autostats.out
index c0390066c39..2705b8e70cd 100644
--- a/src/test/regress/expected/autostats.out
+++ b/src/test/regress/expected/autostats.out
@@ -45,8 +45,8 @@ set role=autostats_nonowner;
LOG: statement: set role=autostats_nonowner;
insert into autostats_test select generate_series(1, 10);
LOG: statement: insert into autostats_test select generate_series(1, 10);
-ERROR: permission denied for table autostats_test
LOG: An exception was encountered during the execution of statement: insert into autostats_test select generate_series(1, 10);
+ERROR: permission denied for table autostats_test
select relname, reltuples from pg_class where relname='autostats_test';
LOG: statement: select relname, reltuples from pg_class where relname='autostats_test';
relname | reltuples
@@ -87,8 +87,8 @@ LOG: statement: select relname, reltuples from pg_class where relname='autostat
-- Try to disable allow_nonowner GUC as ordinary user, should fail
set gp_autostats_allow_nonowner=off;
LOG: statement: set gp_autostats_allow_nonowner=off;
-ERROR: permission denied to set parameter "gp_autostats_allow_nonowner"
LOG: An exception was encountered during the execution of statement: set gp_autostats_allow_nonowner=off;
+ERROR: permission denied to set parameter "gp_autostats_allow_nonowner"
show gp_autostats_allow_nonowner;
LOG: statement: show gp_autostats_allow_nonowner;
gp_autostats_allow_nonowner
diff --git a/src/test/regress/expected/bfv_aggregate_optimizer.out b/src/test/regress/expected/bfv_aggregate_optimizer.out
index 47ae7048795..8a516cf9c30 100644
--- a/src/test/regress/expected/bfv_aggregate_optimizer.out
+++ b/src/test/regress/expected/bfv_aggregate_optimizer.out
@@ -1588,18 +1588,16 @@ select array_agg(a order by b desc nulls last) from aggordertest;
create temp table mpp14125 as select repeat('a', a) a, a % 10 b from generate_series(1, 100)a;
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entry.
explain select string_agg(a, '') from mpp14125 group by b;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------
- Gather Motion 3:1 (slice2; segments: 3) (cost=0.00..431.07 rows=10 width=8)
- -> GroupAggregate (cost=0.00..431.07 rows=4 width=8)
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..431.00 rows=1 width=8)
+ -> HashAggregate (cost=0.00..431.00 rows=1 width=8)
Group Key: b
- -> Sort (cost=0.00..431.06 rows=34 width=55)
- Sort Key: b
- -> Redistribute Motion 3:3 (slice1; segments: 3) (cost=0.00..431.01 rows=34 width=55)
- Hash Key: b
- -> Seq Scan on mpp14125 (cost=0.00..431.00 rows=34 width=55)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.83.0
-(9 rows)
+ -> Redistribute Motion 3:3 (slice2; segments: 3) (cost=0.00..431.00 rows=1 width=12)
+ Hash Key: b
+ -> Seq Scan on mpp14125 (cost=0.00..431.00 rows=1 width=12)
+ Optimizer: GPORCA
+(7 rows)
-- end MPP-14125
-- Test that integer AVG() aggregate is accurate with large values. We used to
diff --git a/src/test/regress/expected/bfv_cte_optimizer.out b/src/test/regress/expected/bfv_cte_optimizer.out
index ec2ee57605f..211a7838d4d 100644
--- a/src/test/regress/expected/bfv_cte_optimizer.out
+++ b/src/test/regress/expected/bfv_cte_optimizer.out
@@ -512,6 +512,9 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
-- end_matchsubs
-- Filter out irrelevant LOG messages from segments other than seg2.
\! cat /tmp/bfv_cte.out | grep -P '^(?!LOG)|^(LOG.*seg2)' | grep -vP 'LOG.*fault|decreased xslice state refcount'
+SET
+SET
+SET
LOG: SISC (shareid=0, slice=2): initialized xslice state (seg2 slice2 127.0.1.1:7004 pid=1048240)
LOG: SISC READER (shareid=0, slice=2): wrote notify_done (seg2 slice2 127.0.1.1:7004 pid=1048240)
LOG: SISC READER (shareid=0, slice=4): wrote notify_done (seg2 slice4 127.0.1.1:7004 pid=1048252)
@@ -519,6 +522,7 @@ LOG: SISC WRITER (shareid=0, slice=1): No tuplestore yet, creating tuplestore
LOG: SISC WRITER (shareid=0, slice=1): wrote notify_ready (seg2 slice1 127.0.1.1:7004 pid=1048234)
LOG: SISC WRITER (shareid=0, slice=1): got DONE message from 2 readers (seg2 slice1 127.0.1.1:7004 pid=1048234)
LOG: SISC (shareid=0, slice=1): removed xslice state (seg2 slice1 127.0.1.1:7004 pid=1048234)
+SET
a | a
---+---
1 | 2
diff --git a/src/test/regress/expected/bfv_dml_optimizer.out b/src/test/regress/expected/bfv_dml_optimizer.out
index 9304d84c494..ebae956f180 100644
--- a/src/test/regress/expected/bfv_dml_optimizer.out
+++ b/src/test/regress/expected/bfv_dml_optimizer.out
@@ -208,7 +208,7 @@ explain update update_pk_test set a = 5;
Sort Key: (DMLAction)
-> Redistribute Motion 3:3 (slice1; segments: 3) (cost=0.00..431.00 rows=2 width=22)
Hash Key: a
- -> Split (cost=0.00..431.00 rows=1 width=22)
+ -> Split Update (cost=0.00..431.00 rows=1 width=22)
-> Seq Scan on update_pk_test (cost=0.00..431.00 rows=1 width=18)
Optimizer: Pivotal Optimizer (GPORCA)
(11 rows)
diff --git a/src/test/regress/expected/bfv_joins_optimizer.out b/src/test/regress/expected/bfv_joins_optimizer.out
index cccfe3db059..37ca6b731bb 100644
--- a/src/test/regress/expected/bfv_joins_optimizer.out
+++ b/src/test/regress/expected/bfv_joins_optimizer.out
@@ -3526,6 +3526,7 @@ explain (costs off) select * from b, lateral (select * from a, c where b.i = a.i
---------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
-> Nested Loop
+ Join Filter: ((a.i + b.i) = c.j)
-> Broadcast Motion 3:3 (slice2; segments: 3)
-> Seq Scan on b
-> Materialize
@@ -3537,7 +3538,7 @@ explain (costs off) select * from b, lateral (select * from a, c where b.i = a.i
-> Index Only Scan using c_j_i_idx on c
Index Cond: (j = (a.i + b.i))
Optimizer: Postgres query optimizer
-(13 rows)
+(14 rows)
select * from b, lateral (select * from a, c where b.i = a.i and (a.i + b.i) = c.j) as ac;
i | i | i | j
diff --git a/src/test/regress/expected/bitmap_index_optimizer.out b/src/test/regress/expected/bitmap_index_optimizer.out
index 96225dcf946..7363066464f 100644
--- a/src/test/regress/expected/bitmap_index_optimizer.out
+++ b/src/test/regress/expected/bitmap_index_optimizer.out
@@ -722,6 +722,12 @@ WARNING: consider disabling FTS probes while injecting a panic.
SET client_min_messages='ERROR';
CREATE TABLE trigger_recovery_on_primaries(c int);
RESET client_min_messages;
+SELECT pg_sleep(2);
+ pg_sleep
+----------
+
+(1 row)
+
-- reconnect to the database after restart
\c
SELECT gp_inject_fault('checkpoint', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content > -1;
@@ -732,6 +738,12 @@ SELECT gp_inject_fault('checkpoint', 'reset', dbid) FROM gp_segment_configuratio
Success:
(3 rows)
+SELECT pg_sleep(2);
+ pg_sleep
+----------
+
+(1 row)
+
SELECT gp_inject_fault('finish_prepared_after_record_commit_prepared', 'reset', dbid) FROM gp_segment_configuration WHERE role = 'p' AND content > -1;
gp_inject_fault
-----------------
diff --git a/src/test/regress/expected/box_optimizer.out b/src/test/regress/expected/box_optimizer.out
index b34251ce789..5c2fe130a47 100644
--- a/src/test/regress/expected/box_optimizer.out
+++ b/src/test/regress/expected/box_optimizer.out
@@ -675,3 +675,28 @@ WHERE seq.id IS NULL OR idx.id IS NULL;
RESET enable_seqscan;
RESET enable_indexscan;
RESET enable_bitmapscan;
+-- test non-error-throwing API for some core types
+SELECT pg_input_is_valid('200', 'box');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT * FROM pg_input_error_info('200', 'box');
+ message | detail | hint | sql_error_code
+------------------------------------------+--------+------+----------------
+ invalid input syntax for type box: "200" | | | 22P02
+(1 row)
+
+SELECT pg_input_is_valid('((200,300),(500, xyz))', 'box');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT * FROM pg_input_error_info('((200,300),(500, xyz))', 'box');
+ message | detail | hint | sql_error_code
+-------------------------------------------------------------+--------+------+----------------
+ invalid input syntax for type box: "((200,300),(500, xyz))" | | | 22P02
+(1 row)
+
diff --git a/src/test/regress/expected/brin_multi_optimizer_1.out b/src/test/regress/expected/brin_multi_optimizer_1.out
index d995e7888d2..06b3ccfce39 100644
--- a/src/test/regress/expected/brin_multi_optimizer_1.out
+++ b/src/test/regress/expected/brin_multi_optimizer_1.out
@@ -29,7 +29,7 @@ INSERT INTO brintest_multi SELECT
(four + 1.0)/(hundred+1),
odd::float8 / (tenthous + 1),
format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr,
- substr(md5(unique1::text), 1, 16)::macaddr8,
+ substr(fipshash(unique1::text), 1, 16)::macaddr8,
inet '10.2.3.4/24' + tenthous,
cidr '10.2.3/24' + tenthous,
date '1995-08-15' + tenthous,
@@ -179,7 +179,7 @@ INSERT INTO brinopers_multi VALUES
('macaddr8col', 'macaddr8',
'{>, >=, =, <=, <}',
'{b1:d1:0e:7b:af:a4:42:12, d9:35:91:bd:f7:86:0e:1e, 72:8f:20:6c:2a:01:bf:57, 23:e8:46:63:86:07:ad:cb, 13:16:8e:6a:2e:6c:84:b4}',
- '{33, 15, 1, 13, 6}'),
+ '{31, 17, 1, 11, 4}'),
('inetcol', 'inet',
'{=, <, <=, >, >=}',
'{10.2.14.231/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}',
@@ -322,11 +322,11 @@ WARNING: did not get bitmap indexscan plan for (oidcol,>=,oid,0,100)
WARNING: did not get seqscan plan for (oidcol,=,oid,8800,1)
WARNING: did not get bitmap indexscan plan for (oidcol,<=,oid,9999,100)
WARNING: did not get bitmap indexscan plan for (oidcol,<,oid,9999,100)
-WARNING: did not get bitmap indexscan plan for (macaddr8col,>,macaddr8,b1:d1:0e:7b:af:a4:42:12,33)
-WARNING: did not get bitmap indexscan plan for (macaddr8col,>=,macaddr8,d9:35:91:bd:f7:86:0e:1e,15)
+WARNING: did not get bitmap indexscan plan for (macaddr8col,<,macaddr8,13:16:8e:6a:2e:6c:84:b4,4)
+WARNING: did not get bitmap indexscan plan for (macaddr8col,<=,macaddr8,23:e8:46:63:86:07:ad:cb,11)
WARNING: did not get bitmap indexscan plan for (macaddr8col,=,macaddr8,72:8f:20:6c:2a:01:bf:57,1)
-WARNING: did not get bitmap indexscan plan for (macaddr8col,<=,macaddr8,23:e8:46:63:86:07:ad:cb,13)
-WARNING: did not get bitmap indexscan plan for (macaddr8col,<,macaddr8,13:16:8e:6a:2e:6c:84:b4,6)
+WARNING: did not get bitmap indexscan plan for (macaddr8col,>,macaddr8,b1:d1:0e:7b:af:a4:42:12,31)
+WARNING: did not get bitmap indexscan plan for (macaddr8col,>=,macaddr8,d9:35:91:bd:f7:86:0e:1e,17)
WARNING: did not get seqscan plan for (cidrcol,=,inet,10.2.14/24,2)
WARNING: did not get bitmap indexscan plan for (cidrcol,<,inet,255.255.255.255,100)
WARNING: did not get bitmap indexscan plan for (cidrcol,<=,inet,255.255.255.255,100)
@@ -478,7 +478,7 @@ INSERT INTO brintest_multi SELECT
(four + 1.0)/(hundred+1),
odd::float8 / (tenthous + 1),
format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr,
- substr(md5(unique1::text), 1, 16)::macaddr8,
+ substr(fipshash(unique1::text), 1, 16)::macaddr8,
inet '10.2.3.4' + tenthous,
cidr '10.2.3/24' + tenthous,
date '1995-08-15' + tenthous,
@@ -502,6 +502,12 @@ VACUUM brintest_multi; -- force a summarization cycle in brinidx
insert into public.brintest_multi (float4col) values (real 'nan');
insert into public.brintest_multi (float8col) values (real 'nan');
UPDATE brintest_multi SET int8col = int8col * int4col;
+-- Test handling of inet netmasks with inet_minmax_multi_ops
+CREATE TABLE brin_test_inet (a inet);
+CREATE INDEX ON brin_test_inet USING brin (a inet_minmax_multi_ops);
+INSERT INTO brin_test_inet VALUES ('127.0.0.1/0');
+INSERT INTO brin_test_inet VALUES ('0.0.0.0/12');
+DROP TABLE brin_test_inet;
-- Tests for brin_summarize_new_values
SELECT brin_summarize_new_values('brintest_multi'); -- error, not an index
ERROR: "brintest_multi" is not an index
@@ -624,3 +630,138 @@ EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE b = 1;
Optimizer: Pivotal Optimizer (GPORCA)
(6 rows)
+-- test overflows during CREATE INDEX with extreme timestamp values
+CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ);
+SET datestyle TO iso;
+-- values close to timetamp minimum
+INSERT INTO brin_timestamp_test
+SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval
+ FROM generate_series(1,30) s(i);
+-- values close to timetamp maximum
+INSERT INTO brin_timestamp_test
+SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval
+ FROM generate_series(1,30) s(i);
+CREATE INDEX ON brin_timestamp_test USING brin (a timestamptz_minmax_multi_ops) WITH (pages_per_range=1);
+DROP TABLE brin_timestamp_test;
+-- test overflows during CREATE INDEX with extreme date values
+CREATE TABLE brin_date_test(a DATE);
+-- insert values close to date minimum
+INSERT INTO brin_date_test SELECT '4713-01-01 BC'::date + i FROM generate_series(1, 30) s(i);
+-- insert values close to date minimum
+INSERT INTO brin_date_test SELECT '5874897-12-01'::date + i FROM generate_series(1, 30) s(i);
+CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1);
+SET enable_seqscan = off;
+-- make sure the ranges were built correctly and 2023-01-01 eliminates all
+EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
+SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date;
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1) (actual rows=0 loops=1)
+ -> Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1)
+ Recheck Cond: (a = '2023-01-01'::date)
+ -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1)
+ Index Cond: (a = '2023-01-01'::date)
+ Optimizer: Pivotal Optimizer (GPORCA)
+(6 rows)
+
+DROP TABLE brin_date_test;
+RESET enable_seqscan;
+-- test handling of infinite timestamp values
+CREATE TABLE brin_timestamp_test(a TIMESTAMP);
+INSERT INTO brin_timestamp_test VALUES ('-infinity'), ('infinity');
+INSERT INTO brin_timestamp_test
+SELECT i FROM generate_series('2000-01-01'::timestamp, '2000-02-09'::timestamp, '1 day'::interval) s(i);
+CREATE INDEX ON brin_timestamp_test USING brin (a timestamp_minmax_multi_ops) WITH (pages_per_range=1);
+SET enable_seqscan = off;
+EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
+SELECT * FROM brin_timestamp_test WHERE a = '2023-01-01'::timestamp;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1) (actual rows=0 loops=1)
+ -> Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1)
+ Recheck Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone)
+ -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1)
+ Index Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone)
+ Optimizer: Pivotal Optimizer (GPORCA)
+(6 rows)
+
+EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
+SELECT * FROM brin_timestamp_test WHERE a = '1900-01-01'::timestamp;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1) (actual rows=0 loops=1)
+ -> Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1)
+ Recheck Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone)
+ -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1)
+ Index Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone)
+ Optimizer: Pivotal Optimizer (GPORCA)
+(6 rows)
+
+DROP TABLE brin_timestamp_test;
+RESET enable_seqscan;
+-- test handling of infinite date values
+CREATE TABLE brin_date_test(a DATE);
+INSERT INTO brin_date_test VALUES ('-infinity'), ('infinity');
+INSERT INTO brin_date_test SELECT '2000-01-01'::date + i FROM generate_series(1, 40) s(i);
+CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1);
+SET enable_seqscan = off;
+EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
+SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date;
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1) (actual rows=0 loops=1)
+ -> Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1)
+ Recheck Cond: (a = '2023-01-01'::date)
+ -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1)
+ Index Cond: (a = '2023-01-01'::date)
+ Optimizer: Pivotal Optimizer (GPORCA)
+(6 rows)
+
+EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
+SELECT * FROM brin_date_test WHERE a = '1900-01-01'::date;
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1) (actual rows=0 loops=1)
+ -> Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1)
+ Recheck Cond: (a = '1900-01-01'::date)
+ -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1)
+ Index Cond: (a = '1900-01-01'::date)
+ Optimizer: Pivotal Optimizer (GPORCA)
+(6 rows)
+
+DROP TABLE brin_date_test;
+RESET enable_seqscan;
+RESET datestyle;
+-- test handling of overflow for interval values
+CREATE TABLE brin_interval_test(a INTERVAL);
+INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series(-178000000, -177999980) s(i);
+INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series( 177999980, 178000000) s(i);
+CREATE INDEX ON brin_interval_test USING brin (a interval_minmax_multi_ops) WITH (pages_per_range=1);
+SET enable_seqscan = off;
+EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
+SELECT * FROM brin_interval_test WHERE a = '-30 years'::interval;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1) (actual rows=0 loops=1)
+ -> Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1)
+ Recheck Cond: (a = '@ 30 years ago'::interval)
+ -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1)
+ Index Cond: (a = '@ 30 years ago'::interval)
+ Optimizer: Pivotal Optimizer (GPORCA)
+(6 rows)
+
+EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
+SELECT * FROM brin_interval_test WHERE a = '30 years'::interval;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1) (actual rows=0 loops=1)
+ -> Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1)
+ Recheck Cond: (a = '@ 30 years'::interval)
+ -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1)
+ Index Cond: (a = '@ 30 years'::interval)
+ Optimizer: Pivotal Optimizer (GPORCA)
+(6 rows)
+
+DROP TABLE brin_interval_test;
+RESET enable_seqscan;
+RESET datestyle;
diff --git a/src/test/regress/expected/brin_optimizer.out b/src/test/regress/expected/brin_optimizer.out
index 37d3d21fb10..28fb719175b 100644
--- a/src/test/regress/expected/brin_optimizer.out
+++ b/src/test/regress/expected/brin_optimizer.out
@@ -589,7 +589,7 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-- long random strings (~2000 chars each, so ~6kB for min/max on two
-- columns) to trigger toasting
-WITH rand_value AS (SELECT string_agg(md5(i::text),'') AS val FROM generate_series(1,60) s(i))
+WITH rand_value AS (SELECT string_agg(fipshash(i::text),'') AS val FROM generate_series(1,60) s(i))
INSERT INTO brintest_3
SELECT val, val, val, val FROM rand_value;
CREATE INDEX brin_test_toast_idx ON brintest_3 USING brin (b, c);
@@ -609,7 +609,7 @@ VACUUM brintest_3;
-- retry insert with a different random-looking (but deterministic) value
-- the value is different, and so should replace either min or max in the
-- brin summary
-WITH rand_value AS (SELECT string_agg(md5((-i)::text),'') AS val FROM generate_series(1,60) s(i))
+WITH rand_value AS (SELECT string_agg(fipshash((-i)::text),'') AS val FROM generate_series(1,60) s(i))
INSERT INTO brintest_3
SELECT val, val, val, val FROM rand_value;
-- now try some queries, accessing the brin index
@@ -633,3 +633,8 @@ SELECT * FROM brintest_3 WHERE b < '0';
DROP TABLE brintest_3;
RESET enable_seqscan;
+-- test an unlogged table, mostly to get coverage of brinbuildempty
+CREATE UNLOGGED TABLE brintest_unlogged (n numrange);
+CREATE INDEX brinidx_unlogged ON brintest_unlogged USING brin (n);
+INSERT INTO brintest_unlogged VALUES (numrange(0, 2^1000::numeric));
+DROP TABLE brintest_unlogged;
diff --git a/src/test/regress/expected/catcache_optimizer.out b/src/test/regress/expected/catcache_optimizer.out
index 9358b274691..4127d9be8ab 100644
--- a/src/test/regress/expected/catcache_optimizer.out
+++ b/src/test/regress/expected/catcache_optimizer.out
@@ -53,7 +53,7 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
-> Result (cost=0.00..0.01 rows=1 width=32)
-> Result (cost=0.00..0.01 rows=1 width=32)
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3) (cost=0.00..2.12 rows=4 width=36)
- -> Split (cost=0.00..2.04 rows=4 width=36)
+ -> Split Update (cost=0.00..2.04 rows=4 width=36)
-> Append (cost=0.00..2.04 rows=2 width=36)
-> Seq Scan on dml_14027_union_s_1_prt_2 dml_14027_union_s_1 (cost=0.00..1.02 rows=1 width=36)
-> Seq Scan on dml_14027_union_s_1_prt_def dml_14027_union_s_2 (cost=0.00..1.02 rows=1 width=36)
diff --git a/src/test/regress/expected/create_am_optimizer.out b/src/test/regress/expected/create_am_optimizer.out
index 8bcb572c331..6942937790c 100644
--- a/src/test/regress/expected/create_am_optimizer.out
+++ b/src/test/regress/expected/create_am_optimizer.out
@@ -253,6 +253,35 @@ SELECT amname FROM pg_class c, pg_am am
heap
(1 row)
+-- Switching to heap2 adds new dependency entry to the AM.
+ALTER TABLE heaptable SET ACCESS METHOD heap2;
+SELECT pg_describe_object(classid, objid, objsubid) as obj,
+ pg_describe_object(refclassid, refobjid, refobjsubid) as objref,
+ deptype
+ FROM pg_depend
+ WHERE classid = 'pg_class'::regclass AND
+ objid = 'heaptable'::regclass
+ ORDER BY 1, 2;
+ obj | objref | deptype
+-----------------+---------------------+---------
+ table heaptable | access method heap2 | n
+ table heaptable | schema public | n
+(2 rows)
+
+-- Switching to heap should not have a dependency entry to the AM.
+ALTER TABLE heaptable SET ACCESS METHOD heap;
+SELECT pg_describe_object(classid, objid, objsubid) as obj,
+ pg_describe_object(refclassid, refobjid, refobjsubid) as objref,
+ deptype
+ FROM pg_depend
+ WHERE classid = 'pg_class'::regclass AND
+ objid = 'heaptable'::regclass
+ ORDER BY 1, 2;
+ obj | objref | deptype
+-----------------+---------------+---------
+ table heaptable | schema public | n
+(1 row)
+
ALTER TABLE heaptable SET ACCESS METHOD heap2;
SELECT amname FROM pg_class c, pg_am am
WHERE c.relam = am.oid AND c.oid = 'heaptable'::regclass;
@@ -267,9 +296,35 @@ SELECT COUNT(a), COUNT(1) FILTER(WHERE a=1) FROM heaptable;
9 | 1
(1 row)
+-- ALTER MATERIALIZED VIEW SET ACCESS METHOD
+CREATE MATERIALIZED VIEW heapmv USING heap AS SELECT * FROM heaptable;
+SELECT amname FROM pg_class c, pg_am am
+ WHERE c.relam = am.oid AND c.oid = 'heapmv'::regclass;
+ amname
+--------
+ heap
+(1 row)
+
+ALTER MATERIALIZED VIEW heapmv SET ACCESS METHOD heap2;
+SELECT amname FROM pg_class c, pg_am am
+ WHERE c.relam = am.oid AND c.oid = 'heapmv'::regclass;
+ amname
+--------
+ heap2
+(1 row)
+
+SELECT COUNT(a), COUNT(1) FILTER(WHERE a=1) FROM heapmv;
+ count | count
+-------+-------
+ 9 | 1
+(1 row)
+
-- No support for multiple subcommands
ALTER TABLE heaptable SET ACCESS METHOD heap, SET ACCESS METHOD heap2;
ERROR: cannot have multiple SET ACCESS METHOD subcommands
+ALTER MATERIALIZED VIEW heapmv SET ACCESS METHOD heap, SET ACCESS METHOD heap2;
+ERROR: cannot have multiple SET ACCESS METHOD subcommands
+DROP MATERIALIZED VIEW heapmv;
DROP TABLE heaptable;
-- No support for partitioned tables.
CREATE TABLE am_partitioned(x INT, y INT)
diff --git a/src/test/regress/expected/create_function_sql.out b/src/test/regress/expected/create_function_sql.out
index 5cae4ecbc1e..f789998955c 100644
--- a/src/test/regress/expected/create_function_sql.out
+++ b/src/test/regress/expected/create_function_sql.out
@@ -581,14 +581,13 @@ SELECT * FROM functest_sri1();
(3 rows)
EXPLAIN (verbose, costs off) SELECT * FROM functest_sri1();
- QUERY PLAN
---------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- Output: functest3.a
- -> Seq Scan on temp_func_test.functest3
- Output: functest3.a
- Optimizer: Postgres query optimizer
-(5 rows)
+ QUERY PLAN
+-----------------------------------------------
+ Function Scan on temp_func_test.functest_sri1
+ Output: functest_sri1
+ Function Call: functest_sri1()
+ Optimizer: GPORCA
+(4 rows)
CREATE FUNCTION functest_sri2() RETURNS SETOF int
LANGUAGE SQL
@@ -605,14 +604,13 @@ SELECT * FROM functest_sri2();
(3 rows)
EXPLAIN (verbose, costs off) SELECT * FROM functest_sri2();
- QUERY PLAN
---------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- Output: functest3.a
- -> Seq Scan on temp_func_test.functest3
- Output: functest3.a
- Optimizer: Postgres query optimizer
-(5 rows)
+ QUERY PLAN
+-----------------------------------------------
+ Function Scan on temp_func_test.functest_sri2
+ Output: functest_sri2
+ Function Call: functest_sri2()
+ Optimizer: GPORCA
+(4 rows)
DROP TABLE functest3 CASCADE;
NOTICE: drop cascades to function functest_sri2()
diff --git a/src/test/regress/expected/create_index_optimizer.out b/src/test/regress/expected/create_index_optimizer.out
index f661e53f487..6c4781ae5d4 100644
--- a/src/test/regress/expected/create_index_optimizer.out
+++ b/src/test/regress/expected/create_index_optimizer.out
@@ -2,6 +2,8 @@
-- CREATE_INDEX
-- Create ancillary data structures (i.e. indices)
--
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
--
-- BTREE
--
@@ -31,18 +33,6 @@ ERROR: relation "six_wrong" does not exist
COMMENT ON INDEX six IS 'good index';
COMMENT ON INDEX six IS NULL;
--
--- BTREE ascending/descending cases
---
--- we load int4/text from pure descending data (each key is a new
--- low key) and name/f8 from pure ascending data (each key is a new
--- high key). we had a bug where new low keys would sometimes be
--- "lost".
---
-CREATE INDEX bt_i4_index ON bt_i4_heap USING btree (seqno int4_ops);
-CREATE INDEX bt_name_index ON bt_name_heap USING btree (seqno name_ops);
-CREATE INDEX bt_txt_index ON bt_txt_heap USING btree (seqno text_ops);
-CREATE INDEX bt_f8_index ON bt_f8_heap USING btree (seqno float8_ops);
---
-- BTREE partial indices
--
CREATE INDEX onek2_u1_prtl ON onek2 USING btree(unique1 int4_ops)
@@ -54,9 +44,20 @@ CREATE INDEX onek2_stu1_prtl ON onek2 USING btree(stringu1 name_ops)
--
-- GiST (rtree-equivalent opclasses only)
--
+CREATE TABLE slow_emp4000 (
+ home_base box
+);
+CREATE TABLE fast_emp4000 (
+ home_base box
+);
+\set filename :abs_srcdir '/data/rect.data'
+COPY slow_emp4000 FROM :'filename';
+INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000;
+ANALYZE slow_emp4000;
+ANALYZE fast_emp4000;
CREATE INDEX grect2ind ON fast_emp4000 USING gist (home_base);
-CREATE INDEX gpolygonind ON polygon_tbl USING gist (f1);
-CREATE INDEX gcircleind ON circle_tbl USING gist (f1);
+-- we want to work with a point_tbl that includes a null
+CREATE TEMP TABLE point_tbl AS SELECT * FROM public.point_tbl;
INSERT INTO POINT_TBL(f1) VALUES (NULL);
ANALYZE POINT_TBL;
CREATE INDEX gpointind ON point_tbl USING gist (f1);
@@ -99,23 +100,6 @@ SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL;
278
(1 row)
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
- ORDER BY (poly_center(f1))[0];
- f1
----------------------
- ((2,0),(2,4),(0,0))
-(1 row)
-
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
- ORDER BY area(f1);
- f1
----------------
- <(1,2),3>
- <(1,3),5>
- <(1,2),100>
- <(100,1),115>
-(4 rows)
-
SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon;
count
-------
@@ -187,6 +171,7 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)';
CREATE VIEW point_tblv AS
SELECT * FROM point_tbl
WHERE NOT f1 ~= '(1e-300, -1e-300)' AND (f1 <-> '(0,0)') != 'inf';
+NOTICE: view "point_tblv" will be a temporary view
-- In gpdb, we intentional filter out point (1e-300, -1e-300) and `inf` every order by related queries
-- in this test case file. It is an underflow point, rank it cause randomly results( (0,0),
-- (1e-300, -1e-300) are equal).
@@ -323,55 +308,6 @@ SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL;
278
(1 row)
-EXPLAIN (COSTS OFF)
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
- ORDER BY (poly_center(f1))[0];
- QUERY PLAN
------------------------------------------------------------------------
- Result
- -> Gather Motion 3:1 (slice1; segments: 3)
- Merge Key: ((poly_center(f1))[0])
- -> Sort
- Sort Key: ((poly_center(f1))[0])
- -> Index Scan using gpolygonind on polygon_tbl
- Index Cond: (f1 @> '((1,1),(2,2),(2,1))'::polygon)
- Filter: (f1 @> '((1,1),(2,2),(2,1))'::polygon)
- Optimizer: Pivotal Optimizer (GPORCA)
-(9 rows)
-
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
- ORDER BY (poly_center(f1))[0];
- f1
----------------------
- ((2,0),(2,4),(0,0))
-(1 row)
-
-EXPLAIN (COSTS OFF)
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
- ORDER BY area(f1);
- QUERY PLAN
---------------------------------------------------------------
- Result
- -> Gather Motion 3:1 (slice1; segments: 3)
- Merge Key: (area(f1))
- -> Sort
- Sort Key: (area(f1))
- -> Index Scan using gcircleind on circle_tbl
- Index Cond: (f1 && '<(1,-2),1>'::circle)
- Filter: (f1 && '<(1,-2),1>'::circle)
- Optimizer: Pivotal Optimizer (GPORCA)
-(9 rows)
-
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
- ORDER BY area(f1);
- f1
----------------
- <(1,2),3>
- <(1,3),5>
- <(1,2),100>
- <(100,1),115>
-(4 rows)
-
EXPLAIN (COSTS OFF)
SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon;
QUERY PLAN
@@ -796,6 +732,36 @@ RESET enable_bitmapscan;
--
-- Note: GIN currently supports only bitmap scans, not plain indexscans
--
+CREATE TABLE array_index_op_test (
+ seqno int4,
+ i int4[],
+ t text[]
+);
+\set filename :abs_srcdir '/data/array.data'
+COPY array_index_op_test FROM :'filename';
+ANALYZE array_index_op_test;
+SELECT * FROM array_index_op_test WHERE i = '{NULL}' ORDER BY seqno;
+ seqno | i | t
+-------+--------+--------
+ 102 | {NULL} | {NULL}
+(1 row)
+
+SELECT * FROM array_index_op_test WHERE i @> '{NULL}' ORDER BY seqno;
+ seqno | i | t
+-------+---+---
+(0 rows)
+
+SELECT * FROM array_index_op_test WHERE i && '{NULL}' ORDER BY seqno;
+ seqno | i | t
+-------+---+---
+(0 rows)
+
+SELECT * FROM array_index_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
+ seqno | i | t
+-------+----+----
+ 101 | {} | {}
+(1 row)
+
SET enable_seqscan = OFF;
SET enable_indexscan = OFF;
SET enable_bitmapscan = ON;
@@ -1026,28 +992,6 @@ SELECT * FROM array_index_op_test WHERE i <@ '{}' ORDER BY seqno;
101 | {} | {}
(1 row)
-SELECT * FROM array_op_test WHERE i = '{NULL}' ORDER BY seqno;
- seqno | i | t
--------+--------+--------
- 102 | {NULL} | {NULL}
-(1 row)
-
-SELECT * FROM array_op_test WHERE i @> '{NULL}' ORDER BY seqno;
- seqno | i | t
--------+---+---
-(0 rows)
-
-SELECT * FROM array_op_test WHERE i && '{NULL}' ORDER BY seqno;
- seqno | i | t
--------+---+---
-(0 rows)
-
-SELECT * FROM array_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
- seqno | i | t
--------+----+----
- 101 | {} | {}
-(1 row)
-
CREATE INDEX textarrayidx ON array_index_op_test USING gin (t);
explain (costs off)
SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAAA72908}' ORDER BY seqno;
@@ -1320,18 +1264,6 @@ SELECT * FROM array_index_op_test WHERE t = '{}' ORDER BY seqno;
101 | {} | {}
(1 row)
-SELECT * FROM array_op_test WHERE i = '{NULL}' ORDER BY seqno;
- seqno | i | t
--------+--------+--------
- 102 | {NULL} | {NULL}
-(1 row)
-
-SELECT * FROM array_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
- seqno | i | t
--------+----+----
- 101 | {} | {}
-(1 row)
-
RESET enable_seqscan;
RESET enable_indexscan;
RESET enable_bitmapscan;
@@ -1365,10 +1297,6 @@ Options: fastupdate=on, gin_pending_list_limit=128
--
-- HASH
--
-CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops);
-CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops);
-CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops);
-CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops) WITH (fillfactor=60);
CREATE UNLOGGED TABLE unlogged_hash_table (id int4);
CREATE INDEX unlogged_hash_index ON unlogged_hash_table USING hash (id int4_ops);
DROP TABLE unlogged_hash_table;
@@ -1398,6 +1326,67 @@ SELECT count(*) FROM tenk1 WHERE stringu1 = 'TVAAAA';
DROP INDEX hash_tuplesort_idx;
RESET maintenance_work_mem;
--
+-- Test unique null behavior
+--
+CREATE TABLE unique_tbl (i int, t text);
+CREATE UNIQUE INDEX unique_idx1 ON unique_tbl (i) NULLS DISTINCT;
+CREATE UNIQUE INDEX unique_idx2 ON unique_tbl (i) NULLS NOT DISTINCT;
+INSERT INTO unique_tbl VALUES (1, 'one');
+INSERT INTO unique_tbl VALUES (2, 'two');
+INSERT INTO unique_tbl VALUES (3, 'three');
+INSERT INTO unique_tbl VALUES (4, 'four');
+INSERT INTO unique_tbl VALUES (5, 'one');
+INSERT INTO unique_tbl (t) VALUES ('six');
+INSERT INTO unique_tbl (t) VALUES ('seven'); -- error from unique_idx2
+DETAIL: Key (i)=(null) already exists.
+ERROR: duplicate key value violates unique constraint "unique_idx2"
+DROP INDEX unique_idx1, unique_idx2;
+INSERT INTO unique_tbl (t) VALUES ('seven');
+-- build indexes on filled table
+CREATE UNIQUE INDEX unique_idx3 ON unique_tbl (i) NULLS DISTINCT; -- ok
+CREATE UNIQUE INDEX unique_idx4 ON unique_tbl (i) NULLS NOT DISTINCT; -- error
+DETAIL: Key (i)=(null) is duplicated.
+ERROR: could not create unique index "unique_idx4"
+DELETE FROM unique_tbl WHERE t = 'seven';
+CREATE UNIQUE INDEX unique_idx4 ON unique_tbl (i) NULLS NOT DISTINCT; -- ok now
+\d unique_tbl
+ Table "public.unique_tbl"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ i | integer | | |
+ t | text | | |
+Indexes:
+ "unique_idx3" UNIQUE, btree (i)
+ "unique_idx4" UNIQUE, btree (i) NULLS NOT DISTINCT
+
+\d unique_idx3
+ Index "public.unique_idx3"
+ Column | Type | Key? | Definition
+--------+---------+------+------------
+ i | integer | yes | i
+unique, btree, for table "public.unique_tbl"
+
+\d unique_idx4
+ Index "public.unique_idx4"
+ Column | Type | Key? | Definition
+--------+---------+------+------------
+ i | integer | yes | i
+unique nulls not distinct, btree, for table "public.unique_tbl"
+
+SELECT pg_get_indexdef('unique_idx3'::regclass);
+ pg_get_indexdef
+----------------------------------------------------------------------
+ CREATE UNIQUE INDEX unique_idx3 ON public.unique_tbl USING btree (i)
+(1 row)
+
+SELECT pg_get_indexdef('unique_idx4'::regclass);
+ pg_get_indexdef
+-----------------------------------------------------------------------------------------
+ CREATE UNIQUE INDEX unique_idx4 ON public.unique_tbl USING btree (i) NULLS NOT DISTINCT
+(1 row)
+
+DROP TABLE unique_tbl;
+--
-- Test functional index
--
CREATE TABLE func_index_heap (f1 text, f2 text);
@@ -1481,13 +1470,6 @@ ALTER TABLE covering_index_heap ADD CONSTRAINT covering_pkey PRIMARY KEY USING I
covering_pkey;
DROP TABLE covering_index_heap;
--
--- Also try building functional, expressional, and partial indexes on
--- tables that already contain data.
---
-create index hash_f8_index_1 on hash_f8_heap(abs(random));
-create index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
-create index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
---
-- Try some concurrent index builds
--
-- Unfortunately this only tests about half the code paths because there are
@@ -1717,6 +1699,12 @@ create unique index on cwi_test (a);
alter table cwi_test add primary key using index cwi_test_a_idx ;
ERROR: ALTER TABLE / ADD CONSTRAINT USING INDEX is not supported on partitioned tables
DROP TABLE cwi_test;
+-- PRIMARY KEY constraint cannot be backed by a NULLS NOT DISTINCT index
+CREATE TABLE cwi_test(a int, b int);
+CREATE UNIQUE INDEX cwi_a_nnd ON cwi_test (a) NULLS NOT DISTINCT;
+ALTER TABLE cwi_test ADD PRIMARY KEY USING INDEX cwi_a_nnd;
+ERROR: primary keys cannot use NULLS NOT DISTINCT indexes
+DROP TABLE cwi_test;
--
-- Check handling of indexes on system columns
--
@@ -2024,7 +2012,6 @@ SELECT count(*) FROM dupindexcols
--
-- Check ordering of =ANY indexqual results (bug in 9.2.0)
--
-vacuum tenk1; -- ensure we get consistent plans here
explain (costs off)
SELECT unique1 FROM tenk1
WHERE unique1 IN (1,42,7)
@@ -2735,9 +2722,16 @@ REINDEX INDEX CONCURRENTLY pg_toast.pg_toast_1260_index; -- no catalog toast ind
ERROR: cannot reindex system catalogs concurrently
REINDEX SYSTEM CONCURRENTLY postgres; -- not allowed for SYSTEM
ERROR: cannot reindex system catalogs concurrently
+REINDEX (CONCURRENTLY) SYSTEM postgres; -- ditto
+ERROR: cannot reindex system catalogs concurrently
+REINDEX (CONCURRENTLY) SYSTEM; -- ditto
+ERROR: cannot reindex system catalogs concurrently
-- Warns about catalog relations
REINDEX SCHEMA CONCURRENTLY pg_catalog;
WARNING: cannot reindex system catalogs concurrently, skipping all
+-- Not the current database
+REINDEX DATABASE not_current_database;
+ERROR: can only reindex the currently open database
-- Check the relation status, there should not be invalid indexes
\d concur_reindex_tab
Table "public.concur_reindex_tab"
diff --git a/src/test/regress/expected/create_view_optimizer.out b/src/test/regress/expected/create_view_optimizer.out
index 2123c0150c1..59986b9a6e8 100755
--- a/src/test/regress/expected/create_view_optimizer.out
+++ b/src/test/regress/expected/create_view_optimizer.out
@@ -8,15 +8,38 @@ set Debug_print_plan=on;
-- Virtual class definitions
-- (this also tests the query rewrite system)
--
+-- directory paths and dlsuffix are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+\getenv libdir PG_LIBDIR
+\getenv dlsuffix PG_DLSUFFIX
+\set regresslib :libdir '/regress' :dlsuffix
+CREATE FUNCTION interpt_pp(path, path)
+ RETURNS point
+ AS :'regresslib'
+ LANGUAGE C STRICT;
+CREATE TABLE real_city (
+ pop int4,
+ cname text,
+ outline path
+);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'pop' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
+\set filename :abs_srcdir '/data/real_city.data'
+COPY real_city FROM :'filename';
+ANALYZE real_city;
+SELECT *
+ INTO TABLE ramp
+ FROM ONLY road
+ WHERE name ~ '.*Ramp';
CREATE VIEW street AS
SELECT r.name, r.thepath, c.cname AS cname
FROM ONLY road r, real_city c
- WHERE c.outline ## r.thepath;
+ WHERE c.outline ?# r.thepath;
CREATE VIEW iexit AS
SELECT ih.name, ih.thepath,
interpt_pp(ih.thepath, r.thepath) AS exit
FROM ihighway ih, ramp r
- WHERE ih.thepath ## r.thepath;
+ WHERE ih.thepath ?# r.thepath;
CREATE VIEW toyemp AS
SELECT name, age, location, 12*salary AS annualsal
FROM emp;
@@ -38,27 +61,29 @@ CREATE VIEW key_dependent_view_no_cols AS
--
-- CREATE OR REPLACE VIEW
--
-CREATE TABLE viewtest_tbl (a int, b int);
+CREATE TABLE viewtest_tbl (a int, b int, c numeric(10,1), d text COLLATE "C");
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
COPY viewtest_tbl FROM stdin;
CREATE OR REPLACE VIEW viewtest AS
SELECT * FROM viewtest_tbl;
CREATE OR REPLACE VIEW viewtest AS
SELECT * FROM viewtest_tbl WHERE a > 10;
SELECT * FROM viewtest;
- a | b
-----+----
- 15 | 20
- 20 | 25
+ a | b | c | d
+----+----+-----+-------
+ 15 | 20 | 3.3 | xyzz
+ 20 | 25 | 4.4 | xyzzy
(2 rows)
CREATE OR REPLACE VIEW viewtest AS
- SELECT a, b FROM viewtest_tbl WHERE a > 5 ORDER BY b DESC;
+ SELECT a, b, c, d FROM viewtest_tbl WHERE a > 5 ORDER BY b DESC;
SELECT * FROM viewtest;
- a | b
-----+----
- 20 | 25
- 15 | 20
- 10 | 15
+ a | b | c | d
+----+----+-----+-------
+ 20 | 25 | 4.4 | xyzzy
+ 15 | 20 | 3.3 | xyzz
+ 10 | 15 | 2.2 | xyz
(3 rows)
-- should fail
@@ -69,21 +94,36 @@ ERROR: cannot drop columns from view
CREATE OR REPLACE VIEW viewtest AS
SELECT 1, * FROM viewtest_tbl;
ERROR: cannot change name of view column "a" to "?column?"
+HINT: Use ALTER VIEW ... RENAME COLUMN ... to change name of view column instead.
-- should fail
CREATE OR REPLACE VIEW viewtest AS
- SELECT a, b::numeric FROM viewtest_tbl;
+ SELECT a, b::numeric, c, d FROM viewtest_tbl;
ERROR: cannot change data type of view column "b" from integer to numeric
+-- should fail
+CREATE OR REPLACE VIEW viewtest AS
+ SELECT a, b, c::numeric(10,2), d FROM viewtest_tbl;
+ERROR: cannot change data type of view column "c" from numeric(10,1) to numeric(10,2)
+-- should fail
+CREATE OR REPLACE VIEW viewtest AS
+ SELECT a, b, c, d COLLATE "POSIX" FROM viewtest_tbl;
+ERROR: cannot change collation of view column "d" from "C" to "POSIX"
-- should work
CREATE OR REPLACE VIEW viewtest AS
- SELECT a, b, 0 AS c FROM viewtest_tbl;
+ SELECT a, b, c, d, 0 AS e FROM viewtest_tbl;
DROP VIEW viewtest;
DROP TABLE viewtest_tbl;
-- tests for temporary views
CREATE SCHEMA temp_view_test
CREATE TABLE base_table (a int, id int)
CREATE TABLE base_table2 (a int, id int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
SET search_path TO temp_view_test, public;
CREATE TEMPORARY TABLE temp_table (a int, id int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-- should be created in temp_view_test schema
CREATE VIEW v1 AS SELECT * FROM base_table;
-- should be created in temp object schema
@@ -187,8 +227,14 @@ SELECT relname FROM pg_class
CREATE SCHEMA testviewschm2;
SET search_path TO testviewschm2, public;
CREATE TABLE t1 (num int, name text);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'num' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE t2 (num2 int, value text);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'num2' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TEMP TABLE tt (num2 int, value text);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'num2' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE VIEW nontemp1 AS SELECT * FROM t1 CROSS JOIN t2;
CREATE VIEW temporal1 AS SELECT * FROM t1 CROSS JOIN tt;
NOTICE: view "temporal1" will be a temporary view
@@ -226,10 +272,20 @@ SELECT relname FROM pg_class
(4 rows)
CREATE TABLE tbl1 ( a int, b int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE tbl2 (c int, d int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE tbl3 (e int, f int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'e' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE tbl4 (g int, h int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'g' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TEMP TABLE tmptbl (i int, j int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
--Should be in testviewschm2
CREATE VIEW pubview AS SELECT * FROM tbl1 WHERE tbl1.a
BETWEEN (SELECT d FROM tbl2 WHERE c = 1) AND (SELECT e FROM tbl3 WHERE f = 2)
@@ -271,17 +327,31 @@ ERROR: invalid value for boolean option "security_barrier": 100
CREATE VIEW mysecview6 WITH (invalid_option) -- Error
AS SELECT * FROM tbl1 WHERE a < 100;
ERROR: unrecognized parameter "invalid_option"
+CREATE VIEW mysecview7 WITH (security_invoker=true)
+ AS SELECT * FROM tbl1 WHERE a = 100;
+CREATE VIEW mysecview8 WITH (security_invoker=false, security_barrier=true)
+ AS SELECT * FROM tbl1 WHERE a > 100;
+CREATE VIEW mysecview9 WITH (security_invoker)
+ AS SELECT * FROM tbl1 WHERE a < 100;
+CREATE VIEW mysecview10 WITH (security_invoker=100) -- Error
+ AS SELECT * FROM tbl1 WHERE a <> 100;
+ERROR: invalid value for boolean option "security_invoker": 100
SELECT relname, relkind, reloptions FROM pg_class
WHERE oid in ('mysecview1'::regclass, 'mysecview2'::regclass,
- 'mysecview3'::regclass, 'mysecview4'::regclass)
+ 'mysecview3'::regclass, 'mysecview4'::regclass,
+ 'mysecview7'::regclass, 'mysecview8'::regclass,
+ 'mysecview9'::regclass)
ORDER BY relname;
- relname | relkind | reloptions
-------------+---------+--------------------------
+ relname | relkind | reloptions
+------------+---------+------------------------------------------------
mysecview1 | v |
mysecview2 | v | {security_barrier=true}
mysecview3 | v | {security_barrier=false}
mysecview4 | v | {security_barrier=true}
-(4 rows)
+ mysecview7 | v | {security_invoker=true}
+ mysecview8 | v | {security_invoker=false,security_barrier=true}
+ mysecview9 | v | {security_invoker=true}
+(7 rows)
CREATE OR REPLACE VIEW mysecview1
AS SELECT * FROM tbl1 WHERE a = 256;
@@ -291,17 +361,28 @@ CREATE OR REPLACE VIEW mysecview3 WITH (security_barrier=true)
AS SELECT * FROM tbl1 WHERE a < 256;
CREATE OR REPLACE VIEW mysecview4 WITH (security_barrier=false)
AS SELECT * FROM tbl1 WHERE a <> 256;
+CREATE OR REPLACE VIEW mysecview7
+ AS SELECT * FROM tbl1 WHERE a > 256;
+CREATE OR REPLACE VIEW mysecview8 WITH (security_invoker=true)
+ AS SELECT * FROM tbl1 WHERE a < 256;
+CREATE OR REPLACE VIEW mysecview9 WITH (security_invoker=false, security_barrier=true)
+ AS SELECT * FROM tbl1 WHERE a <> 256;
SELECT relname, relkind, reloptions FROM pg_class
WHERE oid in ('mysecview1'::regclass, 'mysecview2'::regclass,
- 'mysecview3'::regclass, 'mysecview4'::regclass)
+ 'mysecview3'::regclass, 'mysecview4'::regclass,
+ 'mysecview7'::regclass, 'mysecview8'::regclass,
+ 'mysecview9'::regclass)
ORDER BY relname;
- relname | relkind | reloptions
-------------+---------+--------------------------
+ relname | relkind | reloptions
+------------+---------+------------------------------------------------
mysecview1 | v |
mysecview2 | v |
mysecview3 | v | {security_barrier=true}
mysecview4 | v | {security_barrier=false}
-(4 rows)
+ mysecview7 | v |
+ mysecview8 | v | {security_invoker=true}
+ mysecview9 | v | {security_invoker=false,security_barrier=true}
+(7 rows)
-- Check that unknown literals are converted to "text" in CREATE VIEW,
-- so that we don't end up with unknown-type columns.
@@ -345,10 +426,10 @@ CREATE VIEW tt1 AS
c | numeric | | | | main |
d | character varying(4) | | | | extended |
View definition:
- SELECT vv.a,
- vv.b,
- vv.c,
- vv.d
+ SELECT a,
+ b,
+ c,
+ d
FROM ( VALUES ('abc'::character varying(3),'0123456789'::character varying,42,'abcd'::character varying(4)), ('0123456789'::character varying,'abc'::character varying(3),42.12,'abc'::character varying(4))) vv(a, b, c, d);
SELECT * FROM tt1;
@@ -368,8 +449,14 @@ SELECT a::varchar(3) FROM tt1;
DROP VIEW tt1;
-- Test view decompilation in the face of relation renaming conflicts
CREATE TABLE tt1 (f1 int, f2 int, f3 text);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'f1' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE tx1 (x1 int, x2 int, x3 text);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x1' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE temp_view_test.tt1 (y1 int, f2 int, f3 text);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'y1' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE VIEW aliased_view_1 AS
select * from tt1
where exists (select 1 from tx1 where tt1.f1 = tx1.x1);
@@ -390,9 +477,9 @@ CREATE VIEW aliased_view_4 AS
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tt1.f1,
- tt1.f2,
- tt1.f3
+ SELECT f1,
+ f2,
+ f3
FROM tt1
WHERE (EXISTS ( SELECT 1
FROM tx1
@@ -406,9 +493,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a1.f1,
- a1.f2,
- a1.f3
+ SELECT f1,
+ f2,
+ f3
FROM tt1 a1
WHERE (EXISTS ( SELECT 1
FROM tx1
@@ -422,9 +509,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tt1.f1,
- tt1.f2,
- tt1.f3
+ SELECT f1,
+ f2,
+ f3
FROM tt1
WHERE (EXISTS ( SELECT 1
FROM tx1 a2
@@ -438,9 +525,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tt1.y1,
- tt1.f2,
- tt1.f3
+ SELECT y1,
+ f2,
+ f3
FROM temp_view_test.tt1
WHERE (EXISTS ( SELECT 1
FROM tt1 tt1_1
@@ -455,9 +542,9 @@ ALTER TABLE tx1 RENAME TO a1;
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tt1.f1,
- tt1.f2,
- tt1.f3
+ SELECT f1,
+ f2,
+ f3
FROM tt1
WHERE (EXISTS ( SELECT 1
FROM a1
@@ -471,9 +558,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a1.f1,
- a1.f2,
- a1.f3
+ SELECT f1,
+ f2,
+ f3
FROM tt1 a1
WHERE (EXISTS ( SELECT 1
FROM a1 a1_1
@@ -487,9 +574,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tt1.f1,
- tt1.f2,
- tt1.f3
+ SELECT f1,
+ f2,
+ f3
FROM tt1
WHERE (EXISTS ( SELECT 1
FROM a1 a2
@@ -503,9 +590,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tt1.y1,
- tt1.f2,
- tt1.f3
+ SELECT y1,
+ f2,
+ f3
FROM temp_view_test.tt1
WHERE (EXISTS ( SELECT 1
FROM tt1 tt1_1
@@ -520,9 +607,9 @@ ALTER TABLE tt1 RENAME TO a2;
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a2.f1,
- a2.f2,
- a2.f3
+ SELECT f1,
+ f2,
+ f3
FROM a2
WHERE (EXISTS ( SELECT 1
FROM a1
@@ -536,9 +623,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a1.f1,
- a1.f2,
- a1.f3
+ SELECT f1,
+ f2,
+ f3
FROM a2 a1
WHERE (EXISTS ( SELECT 1
FROM a1 a1_1
@@ -552,9 +639,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a2.f1,
- a2.f2,
- a2.f3
+ SELECT f1,
+ f2,
+ f3
FROM a2
WHERE (EXISTS ( SELECT 1
FROM a1 a2_1
@@ -568,9 +655,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tt1.y1,
- tt1.f2,
- tt1.f3
+ SELECT y1,
+ f2,
+ f3
FROM temp_view_test.tt1
WHERE (EXISTS ( SELECT 1
FROM a2
@@ -585,9 +672,9 @@ ALTER TABLE a1 RENAME TO tt1;
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a2.f1,
- a2.f2,
- a2.f3
+ SELECT f1,
+ f2,
+ f3
FROM a2
WHERE (EXISTS ( SELECT 1
FROM tt1
@@ -601,9 +688,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a1.f1,
- a1.f2,
- a1.f3
+ SELECT f1,
+ f2,
+ f3
FROM a2 a1
WHERE (EXISTS ( SELECT 1
FROM tt1
@@ -617,9 +704,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a2.f1,
- a2.f2,
- a2.f3
+ SELECT f1,
+ f2,
+ f3
FROM a2
WHERE (EXISTS ( SELECT 1
FROM tt1 a2_1
@@ -633,9 +720,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tt1.y1,
- tt1.f2,
- tt1.f3
+ SELECT y1,
+ f2,
+ f3
FROM temp_view_test.tt1
WHERE (EXISTS ( SELECT 1
FROM a2
@@ -651,9 +738,9 @@ ALTER TABLE tx1 SET SCHEMA temp_view_test;
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tx1.f1,
- tx1.f2,
- tx1.f3
+ SELECT f1,
+ f2,
+ f3
FROM temp_view_test.tx1
WHERE (EXISTS ( SELECT 1
FROM tt1
@@ -667,9 +754,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a1.f1,
- a1.f2,
- a1.f3
+ SELECT f1,
+ f2,
+ f3
FROM temp_view_test.tx1 a1
WHERE (EXISTS ( SELECT 1
FROM tt1
@@ -683,9 +770,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tx1.f1,
- tx1.f2,
- tx1.f3
+ SELECT f1,
+ f2,
+ f3
FROM temp_view_test.tx1
WHERE (EXISTS ( SELECT 1
FROM tt1 a2
@@ -699,9 +786,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tt1.y1,
- tt1.f2,
- tt1.f3
+ SELECT y1,
+ f2,
+ f3
FROM temp_view_test.tt1
WHERE (EXISTS ( SELECT 1
FROM temp_view_test.tx1
@@ -718,9 +805,9 @@ ALTER TABLE tmp1 RENAME TO tx1;
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tx1.f1,
- tx1.f2,
- tx1.f3
+ SELECT f1,
+ f2,
+ f3
FROM temp_view_test.tx1
WHERE (EXISTS ( SELECT 1
FROM tt1
@@ -734,9 +821,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT a1.f1,
- a1.f2,
- a1.f3
+ SELECT f1,
+ f2,
+ f3
FROM temp_view_test.tx1 a1
WHERE (EXISTS ( SELECT 1
FROM tt1
@@ -750,9 +837,9 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tx1.f1,
- tx1.f2,
- tx1.f3
+ SELECT f1,
+ f2,
+ f3
FROM temp_view_test.tx1
WHERE (EXISTS ( SELECT 1
FROM tt1 a2
@@ -766,14 +853,62 @@ View definition:
f2 | integer | | | | plain |
f3 | text | | | | extended |
View definition:
- SELECT tx1.y1,
- tx1.f2,
- tx1.f3
+ SELECT y1,
+ f2,
+ f3
FROM tx1
WHERE (EXISTS ( SELECT 1
FROM temp_view_test.tx1 tx1_1
WHERE tx1.y1 = tx1_1.f1));
+-- Test correct deparsing of ORDER BY when there is an output name conflict
+create view aliased_order_by as
+select x1 as x2, x2 as x1, x3 from tt1
+ order by x2; -- this is interpreted per SQL92, so really ordering by x1
+\d+ aliased_order_by
+ View "testviewschm2.aliased_order_by"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+----------+-------------
+ x2 | integer | | | | plain |
+ x1 | integer | | | | plain |
+ x3 | text | | | | extended |
+View definition:
+ SELECT x1 AS x2,
+ x2 AS x1,
+ x3
+ FROM tt1
+ ORDER BY tt1.x1;
+
+alter view aliased_order_by rename column x1 to x0;
+\d+ aliased_order_by
+ View "testviewschm2.aliased_order_by"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+----------+-------------
+ x2 | integer | | | | plain |
+ x0 | integer | | | | plain |
+ x3 | text | | | | extended |
+View definition:
+ SELECT x1 AS x2,
+ x2 AS x0,
+ x3
+ FROM tt1
+ ORDER BY x1;
+
+alter view aliased_order_by rename column x3 to x1;
+\d+ aliased_order_by
+ View "testviewschm2.aliased_order_by"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+----------+-------------
+ x2 | integer | | | | plain |
+ x0 | integer | | | | plain |
+ x1 | text | | | | extended |
+View definition:
+ SELECT x1 AS x2,
+ x2 AS x0,
+ x3 AS x1
+ FROM tt1
+ ORDER BY tt1.x1;
+
-- Test aliasing of joins
create view view_of_joins as
select * from
@@ -810,6 +945,8 @@ View definition:
CROSS JOIN tbl4) same;
create table tbl1a (a int, c int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create view view_of_joins_2a as select * from tbl1 join tbl1a using (a);
create view view_of_joins_2b as select * from tbl1 join tbl1a using (a) as x;
create view view_of_joins_2c as select * from (tbl1 join tbl1a using (a)) as y;
@@ -856,8 +993,14 @@ select pg_get_viewdef('view_of_joins_2d', true);
-- Test view decompilation in the face of column addition/deletion/renaming
create table tt2 (a int, b int, c int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create table tt3 (ax int8, b int2, c numeric);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'ax' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create table tt4 (ay int, b int, q int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'ay' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create view v1 as select * from tt2 natural join tt3;
create view v1a as select * from (tt2 natural join tt3) j;
create view v2 as select * from tt2 join tt3 using (b,c) join tt4 using (b);
@@ -1190,7 +1333,11 @@ select pg_get_viewdef('v3', true);
(1 row)
create table tt5 (a int, b int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create table tt6 (c int, d int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create view vv1 as select * from (tt5 cross join tt6) j(aa,bb,cc,dd);
select pg_get_viewdef('vv1', true);
pg_get_viewdef
@@ -1255,17 +1402,21 @@ select pg_get_viewdef('v1', true);
select pg_get_viewdef('v4', true);
pg_get_viewdef
----------------
- SELECT v1.b, +
- v1.c, +
- v1.x AS a,+
- v1.ax +
+ SELECT b, +
+ c, +
+ x AS a, +
+ ax +
FROM v1;
(1 row)
-- Unnamed FULL JOIN USING is lots of fun too
create table tt7 (x int, xx int, y int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
alter table tt7 drop column xx;
create table tt8 (x int, z int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create view vv2 as
select * from (values(1,2,3,4,5)) v(a,b,c,d,e)
union all
@@ -1426,8 +1577,12 @@ select pg_get_viewdef('vv4', true);
-- Implicit coercions in a JOIN USING create issues similar to FULL JOIN
create table tt7a (x date, xx int, y int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
alter table tt7a drop column xx;
create table tt8a (x timestamptz, z int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create view vv2a as
select * from (values(now(),2,3,now(),5)) v(a,b,c,d,e)
union all
@@ -1456,7 +1611,11 @@ select pg_get_viewdef('vv2a', true);
-- Also check dropping a column that existed when the view was made
--
create table tt9 (x int, xx int, y int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create table tt10 (x int, z int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create view vv5 as select x,y,z from tt9 join tt10 using(x);
select pg_get_viewdef('vv5', true);
pg_get_viewdef
@@ -1484,8 +1643,14 @@ select pg_get_viewdef('vv5', true);
-- JOIN USING, and thereby make the USING column name ambiguous
--
create table tt11 (x int, y int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create table tt12 (x int, z int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create table tt13 (z int, q int);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'z' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create view vv6 as select x,y,z,q from
(tt11 join tt12 using(x)) join tt13 using(z);
select pg_get_viewdef('vv6', true);
@@ -1517,6 +1682,8 @@ select pg_get_viewdef('vv6', true);
-- Check cases involving dropped/altered columns in a function's rowtype result
--
create table tt14t (f1 text, f2 text, f3 text, f4 text);
+NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'f1' as the Apache Cloudberry data distribution key for this table.
+HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
insert into tt14t values('foo', 'bar', 'baz', '42');
alter table tt14t drop column f2;
create function tt14f() returns setof tt14t as
@@ -1535,9 +1702,9 @@ create view tt14v as select t.* from tt14f() t;
select pg_get_viewdef('tt14v', true);
pg_get_viewdef
--------------------------------
- SELECT t.f1, +
- t.f3, +
- t.f4 +
+ SELECT f1, +
+ f3, +
+ f4 +
FROM tt14f() t(f1, f3, f4);
(1 row)
@@ -1547,53 +1714,105 @@ select * from tt14v;
foo | baz | 42
(1 row)
-begin;
--- this perhaps should be rejected, but it isn't:
-alter table tt14t drop column f3;
--- f3 is still in the view ...
+alter table tt14t drop column f3; -- fail, view has explicit reference to f3
+ERROR: cannot drop column f3 of table tt14t because other objects depend on it
+DETAIL: view tt14v depends on column f3 of table tt14t
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+-- MERGE16_FIXME: delete command can only delete tuples from master, But we
+-- need to delete them from both master and segments
+
+-- We used to have a bug that would allow the above to succeed, posing
+-- hazards for later execution of the view. Check that the internal
+-- defenses for those hazards haven't bit-rotted, in case some other
+-- bug with similar symptoms emerges.
+-- begin;
+--
+-- -- destroy the dependency entry that prevents the DROP:
+-- delete from pg_depend where
+-- objid = (select oid from pg_rewrite
+-- where ev_class = 'tt14v'::regclass and rulename = '_RETURN')
+-- and refobjsubid = 3
+-- returning pg_describe_object(classid, objid, objsubid) as obj,
+-- pg_describe_object(refclassid, refobjid, refobjsubid) as ref,
+-- deptype;
+--
+-- -- this will now succeed:
+-- alter table tt14t drop column f3;
+--
+-- -- column f3 is still in the view, sort of ...
+-- select pg_get_viewdef('tt14v', true);
+-- -- ... and you can even EXPLAIN it ...
+-- explain (verbose, costs off) select * from tt14v;
+-- -- but it will fail at execution
+-- select f1, f4 from tt14v;
+-- select * from tt14v;
+--
+-- rollback;
+-- likewise, altering a referenced column's type is prohibited ...
+alter table tt14t alter column f4 type integer using f4::integer; -- fail
+ERROR: cannot alter type of a column used by a view or rule
+DETAIL: rule _RETURN on view tt14v depends on column "f4"
+-- ... but some bug might let it happen, so check defenses
+-- begin;
+--
+-- -- destroy the dependency entry that prevents the ALTER:
+-- delete from pg_depend where
+-- objid = (select oid from pg_rewrite
+-- where ev_class = 'tt14v'::regclass and rulename = '_RETURN')
+-- and refobjsubid = 4
+-- returning pg_describe_object(classid, objid, objsubid) as obj,
+-- pg_describe_object(refclassid, refobjid, refobjsubid) as ref,
+-- deptype;
+--
+-- -- this will now succeed:
+-- alter table tt14t alter column f4 type integer using f4::integer;
+--
+-- -- f4 is still in the view ...
+-- select pg_get_viewdef('tt14v', true);
+-- -- but will fail at execution
+-- select f1, f3 from tt14v;
+-- select * from tt14v;
+--
+-- rollback;
+drop view tt14v;
+create view tt14v as select t.f1, t.f4 from tt14f() t;
select pg_get_viewdef('tt14v', true);
pg_get_viewdef
--------------------------------
- SELECT t.f1, +
- t.f3, +
- t.f4 +
+ SELECT f1, +
+ f4 +
FROM tt14f() t(f1, f3, f4);
(1 row)
--- but will fail at execution
-select f1, f4 from tt14v;
+select * from tt14v;
f1 | f4
-----+----
foo | 42
(1 row)
-select * from tt14v;
-ERROR: attribute 3 of type record has been dropped
-rollback;
-begin;
--- this perhaps should be rejected, but it isn't:
-alter table tt14t alter column f4 type integer using f4::integer;
--- f4 is still in the view ...
+alter table tt14t drop column f3; -- ok
select pg_get_viewdef('tt14v', true);
- pg_get_viewdef
---------------------------------
- SELECT t.f1, +
- t.f3, +
- t.f4 +
- FROM tt14f() t(f1, f3, f4);
+ pg_get_viewdef
+----------------------------
+ SELECT f1, +
+ f4 +
+ FROM tt14f() t(f1, f4);
(1 row)
--- but will fail at execution
-select f1, f3 from tt14v;
- f1 | f3
------+-----
- foo | baz
-(1 row)
+explain (verbose, costs off) select * from tt14v;
+ QUERY PLAN
+----------------------------------------
+ Function Scan on testviewschm2.tt14f t
+ Output: t.f1, t.f4
+ Function Call: tt14f()
+(4 rows)
select * from tt14v;
-ERROR: attribute 4 of type record has wrong type
-DETAIL: Table has type integer, but query expects text.
-rollback;
+ f1 | f4
+-----+----
+ foo | 42
+(1 row)
+
-- check display of whole-row variables in some corner cases
create type nestedcomposite as (x int8_tbl);
create view tt15v as select row(i)::nestedcomposite from int8_tbl i;
@@ -1602,16 +1821,16 @@ select * from tt15v;
------------------------------------------
("(123,456)")
("(123,4567890123456789)")
+ ("(4567890123456789,-4567890123456789)")
("(4567890123456789,123)")
("(4567890123456789,4567890123456789)")
- ("(4567890123456789,-4567890123456789)")
(5 rows)
select pg_get_viewdef('tt15v', true);
pg_get_viewdef
------------------------------------------------------
- SELECT ROW(i.*::int8_tbl)::nestedcomposite AS "row"+
FROM int8_tbl i;
+ SELECT ROW(i.*::int8_tbl)::nestedcomposite AS "row"+
(1 row)
select row(i.*::int8_tbl)::nestedcomposite from int8_tbl i;
@@ -1619,9 +1838,9 @@ select row(i.*::int8_tbl)::nestedcomposite from int8_tbl i;
------------------------------------------
("(123,456)")
("(123,4567890123456789)")
+ ("(4567890123456789,-4567890123456789)")
("(4567890123456789,123)")
("(4567890123456789,4567890123456789)")
- ("(4567890123456789,-4567890123456789)")
(5 rows)
create view tt16v as select * from int8_tbl i, lateral(values(i)) ss;
@@ -1638,11 +1857,11 @@ select * from tt16v;
select pg_get_viewdef('tt16v', true);
pg_get_viewdef
-------------------------------------------
- SELECT i.q1, +
+ LATERAL ( VALUES (i.*::int8_tbl)) ss;
i.q2, +
ss.column1 +
FROM int8_tbl i, +
- LATERAL ( VALUES (i.*::int8_tbl)) ss;
+ SELECT i.q1, +
(1 row)
select * from int8_tbl i, lateral(values(i.*::int8_tbl)) ss;
@@ -1669,10 +1888,10 @@ select * from tt17v;
select pg_get_viewdef('tt17v', true);
pg_get_viewdef
---------------------------------------------
- SELECT i.q1, +
- i.q2 +
+ q2 +
FROM int8_tbl i +
WHERE (i.* IN ( VALUES (i.*::int8_tbl)));
+ SELECT q1, +
(1 row)
select * from int8_tbl i where i.* in (values(i.*::int8_tbl));
@@ -1711,13 +1930,13 @@ NOTICE: identifier "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
select pg_get_viewdef('tt18v', true);
pg_get_viewdef
-----------------------------------------------------------------------------------
- SELECT xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q1, +
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q2 +
- FROM int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +
- UNION ALL +
- SELECT xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q1, +
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q2 +
+ FROM int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +
FROM int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx;
+ SELECT xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q1, +
+ SELECT xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q1, +
+ UNION ALL +
(1 row)
explain (costs off) select * from tt18v;
@@ -1727,7 +1946,7 @@ explain (costs off) select * from tt18v;
-> Append
-> Seq Scan on int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-> Seq Scan on int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1
- Optimizer: Pivotal Optimizer (GPORCA) version 3.9.0
+ Optimizer: GPORCA
(5 rows)
-- check display of ScalarArrayOp with a sub-select
@@ -1787,6 +2006,7 @@ select pg_get_viewdef('tt20v', true);
-- reverse-listing of various special function syntaxes required by SQL
create view tt201v as
select
+ ('2022-12-01'::date + '1 day'::interval) at time zone 'UTC' as atz,
extract(day from now()) as extr,
(now(), '1 day'::interval) overlaps
(current_timestamp(2), '1 day'::interval) as o,
@@ -1805,14 +2025,47 @@ select
trim(trailing ' foo ') as rt,
trim(E'\\000'::bytea from E'\\000Tom\\000'::bytea) as btb,
trim(leading E'\\000'::bytea from E'\\000Tom\\000'::bytea) as ltb,
- trim(trailing E'\\000'::bytea from E'\\000Tom\\000'::bytea) as rtb;
+ trim(trailing E'\\000'::bytea from E'\\000Tom\\000'::bytea) as rtb,
+ CURRENT_DATE as cd,
+ (select * from CURRENT_DATE) as cd2,
+ CURRENT_TIME as ct,
+ (select * from CURRENT_TIME) as ct2,
+ CURRENT_TIME (1) as ct3,
+ (select * from CURRENT_TIME (1)) as ct4,
+ CURRENT_TIMESTAMP as ct5,
+ (select * from CURRENT_TIMESTAMP) as ct6,
+ CURRENT_TIMESTAMP (1) as ct7,
+ (select * from CURRENT_TIMESTAMP (1)) as ct8,
+ LOCALTIME as lt1,
+ (select * from LOCALTIME) as lt2,
+ LOCALTIME (1) as lt3,
+ (select * from LOCALTIME (1)) as lt4,
+ LOCALTIMESTAMP as lt5,
+ (select * from LOCALTIMESTAMP) as lt6,
+ LOCALTIMESTAMP (1) as lt7,
+ (select * from LOCALTIMESTAMP (1)) as lt8,
+ CURRENT_CATALOG as ca,
+ (select * from CURRENT_CATALOG) as ca2,
+ CURRENT_ROLE as cr,
+ (select * from CURRENT_ROLE) as cr2,
+ CURRENT_SCHEMA as cs,
+ (select * from CURRENT_SCHEMA) as cs2,
+ CURRENT_USER as cu,
+ (select * from CURRENT_USER) as cu2,
+ USER as us,
+ (select * from USER) as us2,
+ SESSION_USER seu,
+ (select * from SESSION_USER) as seu2,
+ SYSTEM_USER as su,
+ (select * from SYSTEM_USER) as su2;
select pg_get_viewdef('tt201v', true);
pg_get_viewdef
-----------------------------------------------------------------------------------------------
- SELECT EXTRACT(day FROM now()) AS extr, +
+ SELECT (('12-01-2022'::date + '@ 1 day'::interval) AT TIME ZONE 'UTC'::text) AS atz, +
+ EXTRACT(day FROM now()) AS extr, +
((now(), '@ 1 day'::interval) OVERLAPS (CURRENT_TIMESTAMP(2), '@ 1 day'::interval)) AS o,+
- (('foo'::text) IS NORMALIZED) AS isn, +
- (('foo'::text) IS NFKC NORMALIZED) AS isnn, +
+ ('foo'::text IS NORMALIZED) AS isn, +
+ ('foo'::text IS NFKC NORMALIZED) AS isnn, +
NORMALIZE('foo'::text) AS n, +
NORMALIZE('foo'::text, NFKD) AS nfkd, +
OVERLAY('foo'::text PLACING 'bar'::text FROM 2) AS ovl, +
@@ -1826,7 +2079,55 @@ select pg_get_viewdef('tt201v', true);
TRIM(TRAILING FROM ' foo '::text) AS rt, +
TRIM(BOTH '\x00'::bytea FROM '\x00546f6d00'::bytea) AS btb, +
TRIM(LEADING '\x00'::bytea FROM '\x00546f6d00'::bytea) AS ltb, +
- TRIM(TRAILING '\x00'::bytea FROM '\x00546f6d00'::bytea) AS rtb;
+ TRIM(TRAILING '\x00'::bytea FROM '\x00546f6d00'::bytea) AS rtb, +
+ CURRENT_DATE AS cd, +
+ ( SELECT "current_date"."current_date" +
+ FROM CURRENT_DATE "current_date"("current_date")) AS cd2, +
+ CURRENT_TIME AS ct, +
+ ( SELECT "current_time"."current_time" +
+ FROM CURRENT_TIME "current_time"("current_time")) AS ct2, +
+ CURRENT_TIME(1) AS ct3, +
+ ( SELECT "current_time"."current_time" +
+ FROM CURRENT_TIME(1) "current_time"("current_time")) AS ct4, +
+ CURRENT_TIMESTAMP AS ct5, +
+ ( SELECT "current_timestamp"."current_timestamp" +
+ FROM CURRENT_TIMESTAMP "current_timestamp"("current_timestamp")) AS ct6, +
+ CURRENT_TIMESTAMP(1) AS ct7, +
+ ( SELECT "current_timestamp"."current_timestamp" +
+ FROM CURRENT_TIMESTAMP(1) "current_timestamp"("current_timestamp")) AS ct8, +
+ LOCALTIME AS lt1, +
+ ( SELECT "localtime"."localtime" +
+ FROM LOCALTIME "localtime"("localtime")) AS lt2, +
+ LOCALTIME(1) AS lt3, +
+ ( SELECT "localtime"."localtime" +
+ FROM LOCALTIME(1) "localtime"("localtime")) AS lt4, +
+ LOCALTIMESTAMP AS lt5, +
+ ( SELECT "localtimestamp"."localtimestamp" +
+ FROM LOCALTIMESTAMP "localtimestamp"("localtimestamp")) AS lt6, +
+ LOCALTIMESTAMP(1) AS lt7, +
+ ( SELECT "localtimestamp"."localtimestamp" +
+ FROM LOCALTIMESTAMP(1) "localtimestamp"("localtimestamp")) AS lt8, +
+ CURRENT_CATALOG AS ca, +
+ ( SELECT "current_catalog"."current_catalog" +
+ FROM CURRENT_CATALOG "current_catalog"("current_catalog")) AS ca2, +
+ CURRENT_ROLE AS cr, +
+ ( SELECT "current_role"."current_role" +
+ FROM CURRENT_ROLE "current_role"("current_role")) AS cr2, +
+ CURRENT_SCHEMA AS cs, +
+ ( SELECT "current_schema"."current_schema" +
+ FROM CURRENT_SCHEMA "current_schema"("current_schema")) AS cs2, +
+ CURRENT_USER AS cu, +
+ ( SELECT "current_user"."current_user" +
+ FROM CURRENT_USER "current_user"("current_user")) AS cu2, +
+ USER AS us, +
+ ( SELECT "user"."user" +
+ FROM USER "user"("user")) AS us2, +
+ SESSION_USER AS seu, +
+ ( SELECT "session_user"."session_user" +
+ FROM SESSION_USER "session_user"("session_user")) AS seu2, +
+ SYSTEM_USER AS su, +
+ ( SELECT "system_user"."system_user" +
+ FROM SYSTEM_USER "system_user"("system_user")) AS su2;
(1 row)
-- corner cases with empty join conditions
@@ -1866,25 +2167,25 @@ select 42, 43;
select pg_get_viewdef('tt23v', true);
pg_get_viewdef
-------------------------------
- SELECT int8_tbl.q1 AS col_a,+
+ 43 AS col_b;
int8_tbl.q2 AS col_b +
FROM int8_tbl +
- UNION +
SELECT 42 AS col_a, +
- 43 AS col_b;
+ SELECT int8_tbl.q1 AS col_a,+
+ UNION +
(1 row)
select pg_get_ruledef(oid, true) from pg_rewrite
where ev_class = 'tt23v'::regclass and ev_type = '1';
pg_get_ruledef
-----------------------------------------------------------------
- CREATE RULE "_RETURN" AS +
+ 43 AS col_b;
ON SELECT TO tt23v DO INSTEAD SELECT int8_tbl.q1 AS col_a,+
int8_tbl.q2 AS col_b +
FROM int8_tbl +
- UNION +
SELECT 42 AS col_a, +
- 43 AS col_b;
+ CREATE RULE "_RETURN" AS +
+ UNION +
(1 row)
-- test extraction of FieldSelect field names (get_name_for_var_field)
@@ -1917,26 +2218,26 @@ select pg_get_viewdef('tt25v', true);
WITH cte AS MATERIALIZED ( +
SELECT pg_get_keywords() AS k+
) +
- SELECT (cte.k).word AS word +
+ SELECT (k).word AS word +
FROM cte;
(1 row)
-- also check cases seen only in EXPLAIN
explain (verbose, costs off)
select * from tt24v;
- QUERY PLAN
---------------------------------------------------------------------------------------------------
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------
Hash Join
- Output: (share0_ref1.r).column2, ((ROW("*VALUES*".column1, "*VALUES*".column2))).column2
- Hash Cond: (((ROW("*VALUES*".column1, "*VALUES*".column2))).column1 = (share0_ref1.r).column1)
- -> Limit
- Output: (ROW("*VALUES*".column1, "*VALUES*".column2))
+ Output: (share0_ref1.r).column2, ((ROW("*VALUES*_1".column1, "*VALUES*_1".column2))).column2
+ Hash Cond: ((share0_ref1.r).column1 = ((ROW("*VALUES*_1".column1, "*VALUES*_1".column2))).column1)
+ -> Shared Scan (share slice:id 0:0)
+ Output: share0_ref1.r
-> Values Scan on "*VALUES*"
Output: ROW("*VALUES*".column1, "*VALUES*".column2)
-> Hash
- Output: share0_ref1.r
- -> Shared Scan (share slice:id 0:0)
- Output: share0_ref1.r
+ Output: (ROW("*VALUES*_1".column1, "*VALUES*_1".column2))
+ -> Limit
+ Output: (ROW("*VALUES*_1".column1, "*VALUES*_1".column2))
-> Values Scan on "*VALUES*_1"
Output: ROW("*VALUES*_1".column1, "*VALUES*_1".column2)
Optimizer: Postgres query optimizer
@@ -1970,38 +2271,39 @@ select x + y + z as c1,
(x,y) <= ANY (values(1,2),(3,4)) as c11
from (values(1,2,3)) v(x,y,z);
select pg_get_viewdef('tt26v', true);
- pg_get_viewdef
---------------------------------------------------------
- SELECT v.x + v.y + v.z AS c1, +
- v.x * v.y + v.z AS c2, +
- v.x + v.y * v.z AS c3, +
- (v.x + v.y) * v.z AS c4, +
- v.x * (v.y + v.z) AS c5, +
- v.x + (v.y + v.z) AS c6, +
- v.x + (v.y # v.z) AS c7, +
- v.x > v.y AND (v.y > v.z OR v.x > v.z) AS c8, +
- v.x > v.y OR (v.y > v.z AND NOT v.x > v.z) AS c9, +
- ((v.x, v.y) <> ALL ( VALUES (1,2), (3,4))) AS c10,+
- ((v.x, v.y) <= ANY ( VALUES (1,2), (3,4))) AS c11 +
+ pg_get_viewdef
+----------------------------------------------------
+ SELECT x + y + z AS c1, +
+ x * y + z AS c2, +
+ x + y * z AS c3, +
+ (x + y) * z AS c4, +
+ x * (y + z) AS c5, +
+ x + (y + z) AS c6, +
+ x + (y # z) AS c7, +
+ x > y AND (y > z OR x > z) AS c8, +
+ x > y OR (y > z AND NOT x > z) AS c9, +
+ ((x, y) <> ALL ( VALUES (1,2), (3,4))) AS c10,+
+ ((x, y) <= ANY ( VALUES (1,2), (3,4))) AS c11 +
FROM ( VALUES (1,2,3)) v(x, y, z);
(1 row)
--- test display negative operator of const-folder expression
-create table tdis(a int, b int, c int);
+-- test restriction on non-system view expansion.
+create table tt27v_tbl (a int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-create view tdis_v1 as select a,b,c, -1::int from tdis group by 1,2,3,4;
-select pg_get_viewdef('tdis_v1', true);
- pg_get_viewdef
------------------------------------------------------
- SELECT tdis.a, +
- tdis.b, +
- tdis.c, +
- - 1 AS "?column?" +
- FROM tdis +
- GROUP BY tdis.a, tdis.b, tdis.c, ('-1'::integer);
+create view tt27v as select a from tt27v_tbl;
+set restrict_nonsystem_relation_kind to 'view';
+select a from tt27v where a > 0; -- Error
+ERROR: access to non-system view "tt27v" is restricted
+insert into tt27v values (1); -- Error
+ERROR: access to non-system view "tt27v" is restricted
+select viewname from pg_views where viewname = 'tt27v'; -- Ok to access a system view.
+ viewname
+----------
+ tt27v
(1 row)
+reset restrict_nonsystem_relation_kind;
-- clean up all the random objects we made above
DROP SCHEMA temp_view_test CASCADE;
NOTICE: drop cascades to 27 other objects
@@ -2033,7 +2335,7 @@ drop cascades to view aliased_view_2
drop cascades to view aliased_view_3
drop cascades to view aliased_view_4
DROP SCHEMA testviewschm2 CASCADE;
-NOTICE: drop cascades to 76 other objects
+NOTICE: drop cascades to 80 other objects
DETAIL: drop cascades to table t1
drop cascades to view temporal1
drop cascades to view temporal2
@@ -2054,9 +2356,13 @@ drop cascades to view mysecview1
drop cascades to view mysecview2
drop cascades to view mysecview3
drop cascades to view mysecview4
+drop cascades to view mysecview7
+drop cascades to view mysecview8
+drop cascades to view mysecview9
drop cascades to view unspecified_types
drop cascades to table tt1
drop cascades to table tx1
+drop cascades to view aliased_order_by
drop cascades to view view_of_joins
drop cascades to table tbl1a
drop cascades to view view_of_joins_2a
@@ -2108,5 +2414,5 @@ drop cascades to view tt23v
drop cascades to view tt24v
drop cascades to view tt25v
drop cascades to view tt26v
-drop cascades to table tdis
-drop cascades to view tdis_v1
+drop cascades to table tt27v_tbl
+drop cascades to view tt27v
diff --git a/src/test/regress/expected/directory_table_optimizer.out b/src/test/regress/expected/directory_table_optimizer.out
index 4a9adbba58f..28c72120098 100644
--- a/src/test/regress/expected/directory_table_optimizer.out
+++ b/src/test/regress/expected/directory_table_optimizer.out
@@ -73,8 +73,10 @@ SELECT relname, relisshared, relpersistence, relkind FROM pg_class WHERE relname
gp_storage_user_mapping | t | p | r
(1 row)
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set testtablespace :abs_builddir '/testtablespace'
-- CREATE TABLESPACE
-CREATE TABLESPACE directory_tblspc LOCATION '@testtablespace@';
+CREATE TABLESPACE directory_tblspc LOCATION :'testtablespace';
-- CREATE DATABASE
CREATE DATABASE dirtable_db;
\c dirtable_db
@@ -771,62 +773,64 @@ SELECT relative_path, size, tag FROM dir_table2 ORDER BY 1;
---------------+------+-----
(0 rows)
-\COPY dir_table1 FROM '@abs_srcdir@/data/nation.csv'; -- fail
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set nation_file :abs_srcdir '/data/nation.csv'
+COPY dir_table1 FROM :'nation_file'; -- fail
ERROR: Copy from directory table file name can't be null.
-\COPY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation'; -- fail
+COPY dir_table1 FROM :'nation_file' 'nation'; -- fail
ERROR: Only support copy binary from directory table.
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv'; -- fail
+COPY BINARY dir_table1 FROM :'nation_file'; -- fail
ERROR: Copy from directory table file name can't be null.
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation1';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation1';
NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=31193)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -- fail
+COPY BINARY dir_table1 FROM :'nation_file' 'nation1'; -- fail
ERROR: duplicate key value violates unique constraint "dir_table1_pkey"
DETAIL: Key (relative_path)=(nation1) already exists.
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation2' 'nation2'; -- fail
+COPY BINARY dir_table1 FROM :'nation_file' 'nation2' 'nation2'; -- fail
ERROR: syntax error at or near "'nation2'"
-LINE 1: COPY BINARY dir_table1 FROM STDIN 'nation2' 'nation2'; -- fa...
- ^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation2';
+LINE 1: ...berry/src/test/regress/data/nation.csv' 'nation2' 'nation2';
+ ^
+COPY BINARY dir_table1 FROM :'nation_file' 'nation2';
NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=31192)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation3' WITH TAG 'nation';
NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=31193)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail
+COPY BINARY dir_table1 FROM :'nation_file' 'nation3' WITH TAG 'nation'; -- fail
ERROR: duplicate key value violates unique constraint "dir_table1_pkey"
DETAIL: Key (relative_path)=(nation3) already exists.
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation2'; -- fail
+COPY BINARY dir_table1 FROM :'nation_file' 'nation3' WITH TAG 'nation2'; -- fail
ERROR: duplicate key value violates unique constraint "dir_table1_pkey"
DETAIL: Key (relative_path)=(nation3) already exists.
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation4' WITH TAG 'nation';
NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=31192)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail
+COPY BINARY dir_table1 FROM :'nation_file' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail
ERROR: syntax error at or near "WITH"
-LINE 1: ...dir_table1 FROM STDIN 'nation5' WITH TAG 'nation' WITH TAG '...
+LINE 1: ...ress/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG '...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation6';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation6';
NOTICE: dir_table1 INSERT AFTER ROW (seg0 127.0.1.1:7002 pid=2727)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation7';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation7';
NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=2728)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation8';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation8';
NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=2728)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation9';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation9';
NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=2728)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation10';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation10';
NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=2729)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation11';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation11';
NOTICE: dir_table1 INSERT AFTER ROW (seg0 127.0.1.1:7002 pid=2727)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation12';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation12';
NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=2728)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation13';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation13';
NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=2728)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation14';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation14';
NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=2729)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation15';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation15';
NOTICE: dir_table1 INSERT AFTER ROW (seg0 127.0.1.1:7002 pid=2727)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation16';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation16';
NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=2728)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation17';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation17';
NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=2728)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation18';
+COPY BINARY dir_table1 FROM :'nation_file' 'nation18';
NOTICE: dir_table1 INSERT AFTER ROW (seg0 127.0.1.1:7002 pid=2727)
SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1;
relative_path | size | tag
@@ -872,24 +876,25 @@ SELECT relative_path, content FROM directory_table('dir_table1') ORDER BY 1;
nation9 | \x307c414c47455249417c307c20686167676c652e206361726566756c6c792066696e616c206465706f736974732064657465637420736c796c7920616761690a317c415247454e54494e417c317c616c20666f7865732070726f6d69736520736c796c79206163636f7264696e6720746f2074686520726567756c6172206163636f756e74732e20626f6c6420726571756573747320616c6f6e0a327c4252415a494c7c317c7920616c6f6e6773696465206f66207468652070656e64696e67206465706f736974732e206361726566756c6c79207370656369616c207061636b61676573206172652061626f7574207468652069726f6e696320666f726765732e20736c796c79207370656369616c200a337c43414e4144417c317c6561732068616e672069726f6e69632c2073696c656e74207061636b616765732e20736c796c7920726567756c6172207061636b616765732061726520667572696f75736c79206f76657220746865207469746865732e20666c756666696c7920626f6c640a347c45475950547c347c792061626f766520746865206361726566756c6c7920756e757375616c207468656f646f6c697465732e2066696e616c206475676f7574732061726520717569636b6c79206163726f73732074686520667572696f75736c7920726567756c617220640a357c455448494f5049417c307c76656e207061636b616765732077616b6520717569636b6c792e20726567750a367c4652414e43457c337c726566756c6c792066696e616c2072657175657374732e20726567756c61722c2069726f6e690a377c4745524d414e597c337c6c20706c6174656c6574732e20726567756c6172206163636f756e747320782d7261793a20756e757375616c2c20726567756c6172206163636f0a387c494e4449417c327c737320657863757365732063616a6f6c6520736c796c79206163726f737320746865207061636b616765732e206465706f73697473207072696e742061726f756e0a397c494e444f4e455349417c327c20736c796c792065787072657373206173796d70746f7465732e20726567756c6172206465706f7369747320686167676c6520736c796c792e206361726566756c6c792069726f6e696320686f636b657920706c617965727320736c65657020626c697468656c792e206361726566756c6c0a31307c4952414e7c347c6566756c6c7920616c6f6e6773696465206f662074686520736c796c792066696e616c20646570656e64656e636965732e200a31317c495241517c347c6e6963206465706f7369747320626f6f73742061746f702074686520717569636b6c792066696e616c2072657175657374733f20717569636b6c7920726567756c610a31327c4a4150414e7c327c6f75736c792e2066696e616c2c20657870726573732067696674732063616a6f6c6520610a31337c4a4f5244414e7c347c6963206465706f736974732061726520626c697468656c792061626f757420746865206361726566756c6c7920726567756c61722070610a31347c4b454e59417c307c2070656e64696e67206578637573657320686167676c6520667572696f75736c79206465706f736974732e2070656e64696e672c20657870726573732070696e746f206265616e732077616b6520666c756666696c79207061737420740a31357c4d4f524f43434f7c307c726e732e20626c697468656c7920626f6c6420636f7572747320616d6f6e672074686520636c6f73656c7920726567756c6172207061636b616765732075736520667572696f75736c7920626f6c6420706c6174656c6574733f0a31367c4d4f5a414d42495155457c307c732e2069726f6e69632c20756e757375616c206173796d70746f7465732077616b6520626c697468656c7920720a31377c504552557c317c706c6174656c6574732e20626c697468656c792070656e64696e6720646570656e64656e636965732075736520666c756666696c79206163726f737320746865206576656e2070696e746f206265616e732e206361726566756c6c792073696c656e74206163636f756e0a31387c4348494e417c327c6320646570656e64656e636965732e20667572696f75736c792065787072657373206e6f746f726e697320736c65657020736c796c7920726567756c6172206163636f756e74732e20696465617320736c6565702e206465706f730a31397c524f4d414e49417c337c756c6172206173796d70746f746573206172652061626f75742074686520667572696f7573206d756c7469706c696572732e206578707265737320646570656e64656e63696573206e61672061626f7665207468652069726f6e6963616c6c792069726f6e6963206163636f756e740a32307c5341554449204152414249417c347c74732e2073696c656e7420726571756573747320686167676c652e20636c6f73656c792065787072657373207061636b6167657320736c656570206163726f73732074686520626c697468656c790a32317c564945544e414d7c327c68656c7920656e746963696e676c792065787072657373206163636f756e74732e206576656e2c2066696e616c200a32327c5255535349417c337c20726571756573747320616761696e73742074686520706c6174656c65747320757365206e65766572206163636f7264696e6720746f2074686520717569636b6c7920726567756c61722070696e740a32337c554e49544544204b494e47444f4d7c337c65616e7320626f6f7374206361726566756c6c79207370656369616c2072657175657374732e206163636f756e7473206172652e206361726566756c6c0a32347c554e49544544205354415445537c317c792066696e616c207061636b616765732e20736c6f7720666f7865732063616a6f6c6520717569636b6c792e20717569636b6c792073696c656e7420706c6174656c657473206272656163682069726f6e6963206163636f756e74732e20756e757375616c2070696e746f2062650a
(17 rows)
-COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv'; -- fail
+\set cat_nation_file 'cat ' :abs_srcdir '/data/nation.csv'
+COPY dir_table2 FROM PROGRAM :'cat_nation_file'; -- fail
ERROR: Copy from directory table file name can't be null.
-COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation'; -- fail
+COPY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation'; -- fail
ERROR: Only support copy binary from directory table.
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation1';
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation1'; -- fail
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation1';
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation1'; -- fail
ERROR: duplicate key value violates unique constraint "dir_table2_pkey"
DETAIL: Key (relative_path)=(nation1) already exists.
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation2';
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation';
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation2';
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation3' WITH TAG 'nation';
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation3' WITH TAG 'nation'; -- fail
ERROR: duplicate key value violates unique constraint "dir_table2_pkey"
DETAIL: Key (relative_path)=(nation3) already exists.
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation2'; -- fail
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation3' WITH TAG 'nation2'; -- fail
ERROR: duplicate key value violates unique constraint "dir_table2_pkey"
DETAIL: Key (relative_path)=(nation3) already exists.
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation';
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation4' WITH TAG 'nation';
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail
ERROR: syntax error at or near "WITH"
LINE 1: ...ress/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG '...
^
@@ -911,130 +916,130 @@ SELECT relative_path, content FROM directory_table('dir_table2') ORDER BY 1;
nation4 | \x307c414c47455249417c307c20686167676c652e206361726566756c6c792066696e616c206465706f736974732064657465637420736c796c7920616761690a317c415247454e54494e417c317c616c20666f7865732070726f6d69736520736c796c79206163636f7264696e6720746f2074686520726567756c6172206163636f756e74732e20626f6c6420726571756573747320616c6f6e0a327c4252415a494c7c317c7920616c6f6e6773696465206f66207468652070656e64696e67206465706f736974732e206361726566756c6c79207370656369616c207061636b61676573206172652061626f7574207468652069726f6e696320666f726765732e20736c796c79207370656369616c200a337c43414e4144417c317c6561732068616e672069726f6e69632c2073696c656e74207061636b616765732e20736c796c7920726567756c6172207061636b616765732061726520667572696f75736c79206f76657220746865207469746865732e20666c756666696c7920626f6c640a347c45475950547c347c792061626f766520746865206361726566756c6c7920756e757375616c207468656f646f6c697465732e2066696e616c206475676f7574732061726520717569636b6c79206163726f73732074686520667572696f75736c7920726567756c617220640a357c455448494f5049417c307c76656e207061636b616765732077616b6520717569636b6c792e20726567750a367c4652414e43457c337c726566756c6c792066696e616c2072657175657374732e20726567756c61722c2069726f6e690a377c4745524d414e597c337c6c20706c6174656c6574732e20726567756c6172206163636f756e747320782d7261793a20756e757375616c2c20726567756c6172206163636f0a387c494e4449417c327c737320657863757365732063616a6f6c6520736c796c79206163726f737320746865207061636b616765732e206465706f73697473207072696e742061726f756e0a397c494e444f4e455349417c327c20736c796c792065787072657373206173796d70746f7465732e20726567756c6172206465706f7369747320686167676c6520736c796c792e206361726566756c6c792069726f6e696320686f636b657920706c617965727320736c65657020626c697468656c792e206361726566756c6c0a31307c4952414e7c347c6566756c6c7920616c6f6e6773696465206f662074686520736c796c792066696e616c20646570656e64656e636965732e200a31317c495241517c347c6e6963206465706f7369747320626f6f73742061746f702074686520717569636b6c792066696e616c2072657175657374733f20717569636b6c7920726567756c610a31327c4a4150414e7c327c6f75736c792e2066696e616c2c20657870726573732067696674732063616a6f6c6520610a31337c4a4f5244414e7c347c6963206465706f736974732061726520626c697468656c792061626f757420746865206361726566756c6c7920726567756c61722070610a31347c4b454e59417c307c2070656e64696e67206578637573657320686167676c6520667572696f75736c79206465706f736974732e2070656e64696e672c20657870726573732070696e746f206265616e732077616b6520666c756666696c79207061737420740a31357c4d4f524f43434f7c307c726e732e20626c697468656c7920626f6c6420636f7572747320616d6f6e672074686520636c6f73656c7920726567756c6172207061636b616765732075736520667572696f75736c7920626f6c6420706c6174656c6574733f0a31367c4d4f5a414d42495155457c307c732e2069726f6e69632c20756e757375616c206173796d70746f7465732077616b6520626c697468656c7920720a31377c504552557c317c706c6174656c6574732e20626c697468656c792070656e64696e6720646570656e64656e636965732075736520666c756666696c79206163726f737320746865206576656e2070696e746f206265616e732e206361726566756c6c792073696c656e74206163636f756e0a31387c4348494e417c327c6320646570656e64656e636965732e20667572696f75736c792065787072657373206e6f746f726e697320736c65657020736c796c7920726567756c6172206163636f756e74732e20696465617320736c6565702e206465706f730a31397c524f4d414e49417c337c756c6172206173796d70746f746573206172652061626f75742074686520667572696f7573206d756c7469706c696572732e206578707265737320646570656e64656e63696573206e61672061626f7665207468652069726f6e6963616c6c792069726f6e6963206163636f756e740a32307c5341554449204152414249417c347c74732e2073696c656e7420726571756573747320686167676c652e20636c6f73656c792065787072657373207061636b6167657320736c656570206163726f73732074686520626c697468656c790a32317c564945544e414d7c327c68656c7920656e746963696e676c792065787072657373206163636f756e74732e206576656e2c2066696e616c200a32327c5255535349417c337c20726571756573747320616761696e73742074686520706c6174656c65747320757365206e65766572206163636f7264696e6720746f2074686520717569636b6c7920726567756c61722070696e740a32337c554e49544544204b494e47444f4d7c337c65616e7320626f6f7374206361726566756c6c79207370656369616c2072657175657374732e206163636f756e7473206172652e206361726566756c6c0a32347c554e49544544205354415445537c317c792066696e616c207061636b616765732e20736c6f7720666f7865732063616a6f6c6520717569636b6c792e20717569636b6c792073696c656e7420706c6174656c657473206272656163682069726f6e6963206163636f756e74732e20756e757375616c2070696e746f2062650a
(4 rows)
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation.txt'; -- OK
+COPY BINARY dir_table1 FROM :'nation_file' 'nation.txt'; -- OK
NOTICE: dir_table1 INSERT AFTER ROW
-COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation2.txt'; -- OK
+COPY BINARY dir_table1 FROM :'nation_file' 'nation2.txt'; -- OK
NOTICE: dir_table1 INSERT AFTER ROW
-\COPY BINARY "abs.dir_table" FROM '@abs_srcdir@/data/nation.csv' 'aa.bb'; -- OK
-COPY BINARY "abs.dir_table" FROM '@abs_srcdir@/data/nation.csv' 'cc.dd'; -- OK
+COPY BINARY "abs.dir_table" FROM :'nation_file' 'aa.bb'; -- OK
+COPY BINARY "abs.dir_table" FROM :'nation_file' 'cc.dd'; -- OK
-- Test copy binary from directory table
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (format CSV);
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (format CSV);
ERROR: conflicting or redundant options
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (format CSV...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (format CSV...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (freeze off);
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (freeze off);
ERROR: option "freeze" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (freeze off...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (freeze off...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (freeze on);
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (freeze on);
ERROR: option "freeze" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (freeze on)...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (freeze on)...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (delimiter ',');
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (delimiter ',');
ERROR: option "delimiter" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (delimiter ...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (delimiter ...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (null ' ');
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (null ' ');
ERROR: option "null" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (null ' ');
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (null ' ');
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (header off);
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (header off);
ERROR: option "header" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (header off...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (header off...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (header on);
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (header on);
ERROR: option "header" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (header on)...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (header on)...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (quote ':');
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (quote ':');
ERROR: option "quote" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (quote ':')...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (quote ':')...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (escape ':');
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (escape ':');
ERROR: option "escape" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (escape ':'...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (escape ':'...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (force_quote (a));
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (force_quote (a));
ERROR: option "force_quote" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (force_quot...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (force_quot...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (force_quote *);
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (force_quote *);
ERROR: option "force_quote" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (force_quot...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (force_quot...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (force_not_null (a));
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (force_not_null (a));
ERROR: option "force_not_null" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (force_not_...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (force_not_...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (force_null (a));
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (force_null (a));
ERROR: option "force_null" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (force_null...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (force_null...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (convert_selectively (a));
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (convert_selectively (a));
ERROR: option "convert_selectively" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (convert_se...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (convert_se...
^
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (encoding 'sql_ascii');
+COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (encoding 'sql_ascii');
ERROR: option "encoding" not recognized
-LINE 1: ...OPY BINARY dir_table1 FROM STDIN 'nation_failed' (encoding '...
+LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (encoding '...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (format CSV);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (format CSV);
ERROR: conflicting or redundant options
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (format CSV...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (freeze off);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (freeze off);
ERROR: option "freeze" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (freeze off...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (freeze on);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (freeze on);
ERROR: option "freeze" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (freeze on)...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (delimiter ',');
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (delimiter ',');
ERROR: option "delimiter" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (delimiter ...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (null ' ');
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (null ' ');
ERROR: option "null" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (null ' ');
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (header off);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (header off);
ERROR: option "header" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (header off...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (header on);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (header on);
ERROR: option "header" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (header on)...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (quote ':');
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (quote ':');
ERROR: option "quote" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (quote ':')...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (escape ':');
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (escape ':');
ERROR: option "escape" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (escape ':'...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (force_quote (a));
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (force_quote (a));
ERROR: option "force_quote" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (force_quot...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (force_quote *);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (force_quote *);
ERROR: option "force_quote" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (force_quot...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (force_not_null (a));
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (force_not_null (a));
ERROR: option "force_not_null" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (force_not_...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (force_null (a));
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (force_null (a));
ERROR: option "force_null" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (force_null...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (convert_selectively (a));
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (convert_selectively (a));
ERROR: option "convert_selectively" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (convert_se...
^
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' (encoding 'sql_ascii');
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (encoding 'sql_ascii');
ERROR: option "encoding" not recognized
LINE 1: ...rc/test/regress/data/nation.csv' 'nation_failed' (encoding '...
^
@@ -1094,31 +1099,36 @@ SELECT md5_equal('dir_table2', 'nation4');
(1 row)
-- Test Copy To directory table
-\COPY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail
+\set dir_table1_file :abs_srcdir '/data/dir_table1'
+\set dir_table12_file :abs_srcdir '/data/dir_table2'
+\set dir_nation1_file :abs_srcdir '/data/dir_nation1'
+\set dir_unknown_file :abs_srcdir '/data/dir_unknown'
+\set nation2_gz 'gzip -c -1 > ' :abs_srcdir '/data/nation2.gz'
+COPY dir_table1 TO :'dir_table1_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-\COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail
+COPY BINARY dir_table1 TO :'dir_table1_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-COPY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail
+COPY dir_table1 TO :'dir_table1_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail
+COPY BINARY dir_table1 TO :'dir_table1_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-\COPY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail
+COPY dir_table2 TO :'dir_table12_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-\COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail
+COPY BINARY dir_table2 TO :'dir_table12_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-COPY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail
+COPY dir_table2 TO :'dir_table12_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail
+COPY BINARY dir_table2 TO :'dir_table12_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-\COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail
+COPY BINARY dir_table1 TO :'dir_table1_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail
+COPY BINARY dir_table1 TO :'dir_table1_file'; -- fail
ERROR: COPY to directory table must specify the relative_path name.
-\COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO '@abs_srcdir@/data/nation1'; -- OK
-COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO '@abs_srcdir@/data/nation1'; -- OK
-\COPY BINARY DIRECTORY TABLE dir_table1 'unknown' TO '@abs_srcdir@/data/unknown'; -- OK
-COPY BINARY DIRECTORY TABLE dir_table1 'unknown' TO '@abs_srcdir@/data/unknown'; -- OK
-\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO :'dir_nation1_file'; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO :'dir_nation1_file'; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'unknown' TO :'dir_unknown_file'; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'unknown' TO :'dir_unknown_file'; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK
0|ALGERIA|0| haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@@ -1170,7 +1180,7 @@ COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK
22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint
23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull
24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be
-\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK
0|ALGERIA|0| haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
@@ -1222,14 +1232,20 @@ COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK
22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint
23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull
24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be
-\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > @abs_srcdir@/data/nation2.gz'; -- OK
-COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > @abs_srcdir@/data/nation2.gz'; -- OK
-\COPY BINARY DIRECTORY TABLE "abs.dir_table" 'aa.bb' TO '@abs_srcdir@/data/aa.bb'; -- OK
-COPY BINARY DIRECTORY TABLE "abs.dir_table" 'cc.dd' TO '@abs_srcdir@/data/cc.dd'; -- OK
-\COPY BINARY DIRECTORY TABLE dir_table1 'nation.txt' TO '@abs_srcdir@/data/nation.txt'; -- OK
-COPY BINARY DIRECTORY TABLE dir_table1 'nation2.txt' TO '@abs_srcdir@/data/nation2.txt'; -- OK
-\COPY BINARY DIRECTORY TABLE public.dir_table1 'nation.txt' TO '@abs_srcdir@/data/nation3.txt'; -- OK
-COPY BINARY DIRECTORY TABLE public.dir_table1 'nation2.txt' TO '@abs_srcdir@/data/nation4.txt'; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM :'nation2_gz'; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM :'nation2_gz'; -- OK
+\set aa_bb_file :abs_srcdir '/data/aa.bb'
+\set cc_dd_file :abs_srcdir '/data/cc.dd'
+\set nation_txt_file :abs_srcdir '/data/nation.txt'
+\set nation2_txt_file :abs_srcdir '/data/nation2.txt'
+\set nation3_txt_file :abs_srcdir '/data/nation3.txt'
+\set nation4_txt_file :abs_srcdir '/data/nation4.txt'
+COPY BINARY DIRECTORY TABLE "abs.dir_table" 'aa.bb' TO :'aa_bb_file'; -- OK
+COPY BINARY DIRECTORY TABLE "abs.dir_table" 'cc.dd' TO :'cc_dd_file'; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation.txt' TO :'nation_txt_file'; -- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation2.txt' TO :'nation2_txt_file'; -- OK
+COPY BINARY DIRECTORY TABLE public.dir_table1 'nation.txt' TO :'nation3_txt_file'; -- OK
+COPY BINARY DIRECTORY TABLE public.dir_table1 'nation2.txt' TO :'nation4_txt_file'; -- OK
SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1;
relative_path | size | tag
---------------+------+--------
@@ -1548,13 +1564,15 @@ SELECT relative_path, size, tag FROM dir_table2 ORDER BY 1;
-- Test alter table directory schema table
ALTER TABLE dir_table1 ADD COLUMN a int; -- fail
-ERROR: "dir_table1" is not a table, composite type, or foreign table
+DETAIL: This operation is not supported for partitioned indexes.
+ERROR: ALTER action ADD COLUMN cannot be performed on relation "dir_table1"
ALTER DIRECTORY TABLE dir_table1 ADD COLUMN a int; -- fail
ERROR: syntax error at or near "ADD"
LINE 1: ALTER DIRECTORY TABLE dir_table1 ADD COLUMN a int;
^
ALTER TABLE dir_table2 DROP COLUMN relative_path; -- fail
-ERROR: "dir_table2" is not a table, composite type, or foreign table
+DETAIL: This operation is not supported for partitioned indexes.
+ERROR: ALTER action DROP COLUMN cannot be performed on relation "dir_table2"
ALTER DIRECTORY TABLE dir_table2 DROP COLUMN relative_path; -- fail
ERROR: syntax error at or near "DROP"
LINE 1: ALTER DIRECTORY TABLE dir_table2 DROP COLUMN relative_path;
@@ -1566,7 +1584,8 @@ ERROR: syntax error at or near "RENAME"
LINE 1: ALTER DIRECTORY TABLE dir_table1 RENAME TO dir_table_new;
^
ALTER TABLE dir_table2 ADD CONSTRAINT dirtable_constraint UNIQUE (tag); -- fail
-ERROR: "dir_table2" is not a table or foreign table
+DETAIL: This operation is not supported for partitioned indexes.
+ERROR: ALTER action ADD CONSTRAINT cannot be performed on relation "dir_table2"
ALTER DIRECTORY TABLE dir_table2 ADD CONSTRAINT dirtable_constraint UNIQUE (tag); -- fail
ERROR: syntax error at or near "ADD"
LINE 1: ALTER DIRECTORY TABLE dir_table2 ADD CONSTRAINT dirtable_con...
@@ -1654,8 +1673,8 @@ SELECT relative_path, size, tag FROM dir_table2 ORDER BY 1;
-- Test transaction commit of directory table manipulation
CREATE DIRECTORY TABLE dir_table4 TABLESPACE directory_tblspc;
BEGIN;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_commit';
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_commit2' WITH TAG 'nation';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_commit';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_commit2' WITH TAG 'nation';
COMMIT;
SELECT relative_path, content FROM directory_table('dir_table4') ORDER BY 1;
relative_path | content
@@ -1701,7 +1720,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
-- Test transaction rollback of directory table manipulation
BEGIN;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_rollback';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_rollback';
SELECT relative_path, content FROM directory_table('dir_table4') ORDER BY 1;
relative_path | content
-----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -1742,7 +1761,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
nation_commit2 | nation_updated
(1 row)
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_rollback2' WITH TAG 'nation';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_rollback2' WITH TAG 'nation';
UPDATE dir_table4 SET tag = 'nation_updated' WHERE relative_path = 'nation_rollback2';
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
@@ -1766,7 +1785,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
nation_commit2 | nation_updated
(1 row)
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit' WITH TAG 'nation';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit' WITH TAG 'nation';
SAVEPOINT s1;
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
@@ -1775,7 +1794,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
nation_subcommit | nation
(2 rows)
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit2';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit2';
SAVEPOINT s2;
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
@@ -1785,7 +1804,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
nation_subcommit2 |
(3 rows)
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit3';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit3';
RELEASE SAVEPOINT s1;
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
@@ -1831,7 +1850,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
nation_subcommit3 |
(3 rows)
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit';
SAVEPOINT s2;
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
@@ -1923,7 +1942,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
nation_subcommit3 |
(2 rows)
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit4';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit4';
SAVEPOINT s2;
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
@@ -1936,8 +1955,8 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
ROLLBACK TO SAVEPOINT s1;
COMMIT;
-- Test subtransaction rollback of directory table manipulation
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subrollback1';
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subrollback2';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback1';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback2';
BEGIN;
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
@@ -1948,7 +1967,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
nation_subrollback2 |
(4 rows)
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subrollback3';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback3';
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
---------------------+----------------
@@ -1978,7 +1997,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
ROLLBACK;
BEGIN;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subrollback4';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback4';
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
---------------------+----------------
@@ -2015,7 +2034,7 @@ SELECT remove_file('dir_table4', 'nation_subrollback2');
t
(1 row)
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subrollback5';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback5';
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
---------------------+----------------
@@ -2050,7 +2069,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
nation_subrollback5 |
(4 rows)
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subrollback6';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback6';
SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
relative_path | tag
---------------------+----------------
@@ -2136,5 +2155,5 @@ DROP TRIGGER IF EXISTS trigtest_b_stmt_tg_dirtable_1 ON dir_table1;
NOTICE: relation "dir_table1" does not exist, skipping
DROP TRIGGER IF EXISTS trigtest_a_stmt_tg_dirtable_1 ON dir_table1;
NOTICE: relation "dir_table1" does not exist, skipping
-\!rm -rf @testtablespace@;
+\!rm -rf $PG_ABS_BUILDDIR/testtablespace;
DROP TABLESPACE directory_tblspc;
diff --git a/src/test/regress/expected/domain_optimizer.out b/src/test/regress/expected/domain_optimizer.out
index 4994aee8be9..3057e0b6ed3 100755
--- a/src/test/regress/expected/domain_optimizer.out
+++ b/src/test/regress/expected/domain_optimizer.out
@@ -87,6 +87,56 @@ drop domain domainvarchar restrict;
drop domain domainnumeric restrict;
drop domain domainint4 restrict;
drop domain domaintext;
+-- Test non-error-throwing input
+create domain positiveint int4 check(value > 0);
+create domain weirdfloat float8 check((1 / value) < 10);
+select pg_input_is_valid('1', 'positiveint');
+ pg_input_is_valid
+-------------------
+ t
+(1 row)
+
+select pg_input_is_valid('junk', 'positiveint');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+select pg_input_is_valid('-1', 'positiveint');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+select * from pg_input_error_info('junk', 'positiveint');
+ message | detail | hint | sql_error_code
+-----------------------------------------------+--------+------+----------------
+ invalid input syntax for type integer: "junk" | | | 22P02
+(1 row)
+
+select * from pg_input_error_info('-1', 'positiveint');
+ message | detail | hint | sql_error_code
+----------------------------------------------------------------------------+--------+------+----------------
+ value for domain positiveint violates check constraint "positiveint_check" | | | 23514
+(1 row)
+
+select * from pg_input_error_info('junk', 'weirdfloat');
+ message | detail | hint | sql_error_code
+--------------------------------------------------------+--------+------+----------------
+ invalid input syntax for type double precision: "junk" | | | 22P02
+(1 row)
+
+select * from pg_input_error_info('0.01', 'weirdfloat');
+ message | detail | hint | sql_error_code
+--------------------------------------------------------------------------+--------+------+----------------
+ value for domain weirdfloat violates check constraint "weirdfloat_check" | | | 23514
+(1 row)
+
+-- We currently can't trap errors raised in the CHECK expression itself
+select * from pg_input_error_info('0', 'weirdfloat');
+ERROR: division by zero
+drop domain positiveint;
+drop domain weirdfloat;
-- Test domains over array types
create domain domainint4arr int4[1];
create domain domainchar4arr varchar(4)[2][3];
@@ -426,7 +476,7 @@ explain (verbose, costs off)
Update on public.dcomptable
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
Output: (((d1[1].r := (d1[1].r - '1'::double precision))[1].i := (d1[1].i + '1'::double precision))::dcomptypea), ctid, gp_segment_id, (DMLAction)
- -> Split
+ -> Split Update
Output: (((d1[1].r := (d1[1].r - '1'::double precision))[1].i := (d1[1].i + '1'::double precision))::dcomptypea), ctid, gp_segment_id, DMLAction
-> Seq Scan on public.dcomptable
Output: (d1[1].r := (d1[1].r - '1'::double precision))[1].i := (d1[1].i + '1'::double precision), ctid, gp_segment_id
diff --git a/src/test/regress/expected/explain_optimizer.out b/src/test/regress/expected/explain_optimizer.out
index f0b1cf78650..a7224a466ef 100644
--- a/src/test/regress/expected/explain_optimizer.out
+++ b/src/test/regress/expected/explain_optimizer.out
@@ -51,6 +51,12 @@ begin
reset enable_parallel;
end;
$$;
+-- Disable JIT, or we'll get different output on machines where that's been
+-- forced on
+set jit = off;
+-- Similarly, disable track_io_timing, to avoid output differences when
+-- enabled.
+set track_io_timing = off;
-- Simple cases
select explain_filter('explain select * from int8_tbl i8');
explain_filter
@@ -102,111 +108,6 @@ select explain_filter('explain (analyze, buffers, format text) select * from int
Execution Time: N.N ms
(8 rows)
-select explain_filter('explain (analyze, buffers, format json) select * from int8_tbl i8');
- explain_filter
--------------------------------------------
- [ +
- { +
- "Plan": { +
- "Node Type": "Gather Motion", +
- "Senders": N, +
- "Receivers": N, +
- "Slice": N, +
- "Segments": N, +
- "Gang Type": "primary reader", +
- "Parallel Aware": false, +
- "Async Capable": false, +
- "Startup Cost": N.N, +
- "Total Cost": N.N, +
- "Plan Rows": N, +
- "Plan Width": N, +
- "Actual Startup Time": N.N, +
- "Actual Total Time": N.N, +
- "Actual Rows": N, +
- "Actual Loops": N, +
- "Shared Hit Blocks": N, +
- "Shared Read Blocks": N, +
- "Shared Dirtied Blocks": N, +
- "Shared Written Blocks": N, +
- "Local Hit Blocks": N, +
- "Local Read Blocks": N, +
- "Local Dirtied Blocks": N, +
- "Local Written Blocks": N, +
- "Temp Read Blocks": N, +
- "Temp Written Blocks": N, +
- "Plans": [ +
- { +
- "Node Type": "Seq Scan", +
- "Parent Relationship": "Outer",+
- "Slice": N, +
- "Segments": N, +
- "Gang Type": "primary reader", +
- "Parallel Aware": false, +
- "Async Capable": false, +
- "Relation Name": "int8_tbl", +
- "Alias": "int8_tbl", +
- "Startup Cost": N.N, +
- "Total Cost": N.N, +
- "Plan Rows": N, +
- "Plan Width": N, +
- "Actual Startup Time": N.N, +
- "Actual Total Time": N.N, +
- "Actual Rows": N, +
- "Actual Loops": N, +
- "Shared Hit Blocks": N, +
- "Shared Read Blocks": N, +
- "Shared Dirtied Blocks": N, +
- "Shared Written Blocks": N, +
- "Local Hit Blocks": N, +
- "Local Read Blocks": N, +
- "Local Dirtied Blocks": N, +
- "Local Written Blocks": N, +
- "Temp Read Blocks": N, +
- "Temp Written Blocks": N +
- } +
- ] +
- }, +
- "Settings": { +
- "Optimizer": "GPORCA" +
- }, +
- "Planning": { +
- "Shared Hit Blocks": N, +
- "Shared Read Blocks": N, +
- "Shared Dirtied Blocks": N, +
- "Shared Written Blocks": N, +
- "Local Hit Blocks": N, +
- "Local Read Blocks": N, +
- "Local Dirtied Blocks": N, +
- "Local Written Blocks": N, +
- "Temp Read Blocks": N, +
- "Temp Written Blocks": N +
- }, +
- "Planning Time": N.N, +
- "Triggers": [ +
- ], +
- "Slice statistics": [ +
- { +
- "Slice": N, +
- "Executor Memory": N +
- }, +
- { +
- "Slice": N, +
- "Executor Memory": { +
- "Average": N, +
- "Workers": N, +
- "Subworkers": N, +
- "Maximum Memory Used": N +
- } +
- } +
- ], +
- "Statement statistics": { +
- "Memory used": N +
- }, +
- "Execution Time": N.N +
- } +
- ]
-(1 row)
-
select explain_filter('explain (analyze, buffers, format xml) select * from int8_tbl i8');
explain_filter
------------------------------------------------------------
@@ -481,6 +382,115 @@ select explain_filter('explain (buffers, format json) select * from int8_tbl i8'
]
(1 row)
+-- Check output including I/O timings. These fields are conditional
+-- but always set in JSON format, so check them only in this case.
+set track_io_timing = on;
+select explain_filter('explain (analyze, buffers, format json) select * from int8_tbl i8');
+ explain_filter
+-------------------------------------------
+ [ +
+ { +
+ "Plan": { +
+ "Node Type": "Gather Motion", +
+ "Senders": N, +
+ "Receivers": N, +
+ "Slice": N, +
+ "Segments": N, +
+ "Gang Type": "primary reader", +
+ "Parallel Aware": false, +
+ "Async Capable": false, +
+ "Startup Cost": N.N, +
+ "Total Cost": N.N, +
+ "Plan Rows": N, +
+ "Plan Width": N, +
+ "Actual Startup Time": N.N, +
+ "Actual Total Time": N.N, +
+ "Actual Rows": N, +
+ "Actual Loops": N, +
+ "Shared Hit Blocks": N, +
+ "Shared Read Blocks": N, +
+ "Shared Dirtied Blocks": N, +
+ "Shared Written Blocks": N, +
+ "Local Hit Blocks": N, +
+ "Local Read Blocks": N, +
+ "Local Dirtied Blocks": N, +
+ "Local Written Blocks": N, +
+ "Temp Read Blocks": N, +
+ "Temp Written Blocks": N, +
+ "Plans": [ +
+ { +
+ "Node Type": "Seq Scan", +
+ "Parent Relationship": "Outer",+
+ "Slice": N, +
+ "Segments": N, +
+ "Gang Type": "primary reader", +
+ "Parallel Aware": false, +
+ "Async Capable": false, +
+ "Relation Name": "int8_tbl", +
+ "Alias": "int8_tbl", +
+ "Startup Cost": N.N, +
+ "Total Cost": N.N, +
+ "Plan Rows": N, +
+ "Plan Width": N, +
+ "Actual Startup Time": N.N, +
+ "Actual Total Time": N.N, +
+ "Actual Rows": N, +
+ "Actual Loops": N, +
+ "Shared Hit Blocks": N, +
+ "Shared Read Blocks": N, +
+ "Shared Dirtied Blocks": N, +
+ "Shared Written Blocks": N, +
+ "Local Hit Blocks": N, +
+ "Local Read Blocks": N, +
+ "Local Dirtied Blocks": N, +
+ "Local Written Blocks": N, +
+ "Temp Read Blocks": N, +
+ "Temp Written Blocks": N +
+ } +
+ ] +
+ }, +
+ "Settings": { +
+ "Optimizer": "GPORCA" +
+ }, +
+ "Planning": { +
+ "Shared Hit Blocks": N, +
+ "Shared Read Blocks": N, +
+ "Shared Dirtied Blocks": N, +
+ "Shared Written Blocks": N, +
+ "Local Hit Blocks": N, +
+ "Local Read Blocks": N, +
+ "Local Dirtied Blocks": N, +
+ "Local Written Blocks": N, +
+ "Temp Read Blocks": N, +
+ "Temp Written Blocks": N +
+ }, +
+ "Planning Time": N.N, +
+ "Triggers": [ +
+ ], +
+ "Slice statistics": [ +
+ { +
+ "Slice": N, +
+ "Executor Memory": N +
+ }, +
+ { +
+ "Slice": N, +
+ "Executor Memory": { +
+ "Average": N, +
+ "Workers": N, +
+ "Subworkers": N, +
+ "Maximum Memory Used": N +
+ } +
+ } +
+ ], +
+ "Statement statistics": { +
+ "Memory used": N +
+ }, +
+ "Execution Time": N.N +
+ } +
+ ]
+(1 row)
+
+set track_io_timing = off;
-- SETTINGS option
-- We have to ignore other settings that might be imposed by the environment,
-- so printing the whole Settings field unfortunately won't do.
@@ -501,6 +511,49 @@ select explain_filter_to_json('explain (settings, format json) select * from int
(1 row)
rollback;
+-- GENERIC_PLAN option
+select explain_filter('explain (generic_plan) select unique1 from tenk1 where thousand = $1');
+ explain_filter
+--------------------------------------------------------------------------------------
+ Gather Motion N:N (slice1; segments: N) (cost=N.N..N.N rows=N width=N)
+ -> Index Scan using tenk1_thous_tenthous on tenk1 (cost=N.N..N.N rows=N width=N)
+ Index Cond: (thousand = $1)
+ Optimizer: GPORCA
+(4 rows)
+
+-- should fail
+select explain_filter('explain (analyze, generic_plan) select unique1 from tenk1 where thousand = $1');
+ERROR: EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together
+-- Test EXPLAIN (GENERIC_PLAN) with partition pruning
+-- partitions should be pruned at plan time, based on constants,
+-- but there should be no pruning based on parameter placeholders
+create table gen_part (
+ key1 integer not null,
+ key2 integer not null
+) partition by list (key1);
+create table gen_part_1
+ partition of gen_part for values in (1)
+ partition by range (key2);
+create table gen_part_1_1
+ partition of gen_part_1 for values from (1) to (2);
+create table gen_part_1_2
+ partition of gen_part_1 for values from (2) to (3);
+create table gen_part_2
+ partition of gen_part for values in (2);
+-- should scan gen_part_1_1 and gen_part_1_2, but not gen_part_2
+select explain_filter('explain (generic_plan) select key1, key2 from gen_part where key1 = 1 and key2 = $1');
+ explain_filter
+---------------------------------------------------------------------------------
+ Gather Motion N:N (slice1; segments: N) (cost=N.N..N.N rows=N width=N)
+ -> Append (cost=N.N..N.N rows=N width=N)
+ -> Seq Scan on gen_part_1_1 gen_part_1 (cost=N.N..N.N rows=N width=N)
+ Filter: ((key1 = N) AND (key2 = $1))
+ -> Seq Scan on gen_part_1_2 gen_part_2 (cost=N.N..N.N rows=N width=N)
+ Filter: ((key1 = N) AND (key2 = $1))
+ Optimizer: GPORCA
+(7 rows)
+
+drop table gen_part;
--
-- Test production of per-worker data
--
@@ -605,13 +658,6 @@ select jsonb_pretty(
"Sort Key": [ +
"tenk1.tenthous" +
], +
- "work_mem": { +
- "Used": 0, +
- "Segments": 0, +
- "Max Memory": 0, +
- "Workfile Spilling": 0, +
- "Max Memory Segment": 0 +
- }, +
"Gang Type": "primary reader", +
"Node Type": "Sort", +
"Plan Rows": 0, +
@@ -702,6 +748,7 @@ select jsonb_pretty(
"Shared Written Blocks": 0 +
}, +
"Settings": { +
+ "jit": "off", +
"Optimizer": "GPORCA", +
"optimizer": "on", +
"enable_parallel": "off", +
@@ -725,8 +772,7 @@ select jsonb_pretty(
"Workers": 0, +
"Subworkers": 0, +
"Maximum Memory Used": 0 +
- }, +
- "Work Maximum Memory": 0 +
+ } +
} +
], +
"Statement statistics": { +
@@ -737,6 +783,22 @@ select jsonb_pretty(
(1 row)
rollback;
+-- Test display of temporary objects
+create temp table t1(f1 float8);
+create function pg_temp.mysin(float8) returns float8 language plpgsql
+as 'begin return sin($1); end';
+select explain_filter('explain (verbose) select * from t1 where pg_temp.mysin(f1) < 0.5');
+ explain_filter
+--------------------------------------------------------------------------
+ Gather Motion N:N (slice1; segments: N) (cost=N.N..N.N rows=N width=N)
+ Output: f1
+ -> Seq Scan on pg_temp.t1 (cost=N.N..N.N rows=N width=N)
+ Output: f1
+ Filter: (pg_temp.mysin(t1.f1) < 'N.N'::double precision)
+ Optimizer: GPORCA
+(6 rows)
+
+-- Test compute_query_id
set compute_query_id = on;
select explain_filter('explain (verbose) select * from int8_tbl i8');
explain_filter
diff --git a/src/test/regress/expected/expressions_optimizer.out b/src/test/regress/expected/expressions_optimizer.out
index e3f2496a432..7f94c7afb6b 100644
--- a/src/test/regress/expected/expressions_optimizer.out
+++ b/src/test/regress/expected/expressions_optimizer.out
@@ -2,7 +2,7 @@
-- expression evaluation tests that don't fit into a more specific file
--
--
--- Tests for SQLVAlueFunction
+-- Tests for SQLValueFunction
--
-- current_date (always matches because of transactional behaviour)
SELECT date(now())::text = current_date::text;
@@ -36,7 +36,7 @@ SELECT now()::time(3)::text = localtime(3)::text;
t
(1 row)
--- current_timestamp / localtimestamp (always matches because of transactional behaviour)
+-- current_time[stamp]/ localtime[stamp] (always matches because of transactional behaviour)
SELECT current_timestamp = NOW();
?column?
----------
@@ -57,7 +57,36 @@ SELECT now()::timestamp::text = localtimestamp::text;
t
(1 row)
--- current_role/user/user is tested in rolnames.sql
+-- precision overflow
+SELECT current_time = current_time(7);
+WARNING: TIME(7) WITH TIME ZONE precision reduced to maximum allowed, 6
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT current_timestamp = current_timestamp(7);
+WARNING: TIMESTAMP(7) WITH TIME ZONE precision reduced to maximum allowed, 6
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT localtime = localtime(7);
+WARNING: TIME(7) precision reduced to maximum allowed, 6
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT localtimestamp = localtimestamp(7);
+WARNING: TIMESTAMP(7) precision reduced to maximum allowed, 6
+ ?column?
+----------
+ t
+(1 row)
+
+-- current_role/user/user is tested in rolenames.sql
-- current database / catalog
SELECT current_catalog = current_database();
?column?
@@ -88,90 +117,112 @@ SELECT current_schema;
RESET search_path;
--
--- Tests for BETWEEN
+-- Test parsing of a no-op cast to a type with unspecified typmod
--
--- start_ignore
--- GPDB_13_MERGE_FIXME:
--- ORCA does support 2 phase aggregate, but not clear why it doesn't generate the plan
--- that makes use of 2 phase aggregate. However, the plan is correct.
--- NOTE: we should consider remove this optimizer answer file after we fix this issue.
--- end_ignore
-explain (costs off)
-select count(*) from date_tbl
- where f1 between '1997-01-01' and '1998-01-01';
- QUERY PLAN
------------------------------------------------------------------------------------
- Aggregate
- -> Gather Motion 3:1 (slice1; segments: 3)
- -> Seq Scan on date_tbl
- Filter: ((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date))
- Optimizer: Pivotal Optimizer (GPORCA)
-(5 rows)
-
-select count(*) from date_tbl
- where f1 between '1997-01-01' and '1998-01-01';
- count
--------
- 3
-(1 row)
+begin;
+create table numeric_tbl (f1 numeric(18,3), f2 numeric);
+create view numeric_view as
+ select
+ f1, f1::numeric(16,4) as f1164, f1::numeric as f1n,
+ f2, f2::numeric(16,4) as f2164, f2::numeric as f2n
+ from numeric_tbl;
+\d+ numeric_view
+ View "public.numeric_view"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------------+-----------+----------+---------+---------+-------------
+ f1 | numeric(18,3) | | | | main |
+ f1164 | numeric(16,4) | | | | main |
+ f1n | numeric | | | | main |
+ f2 | numeric | | | | main |
+ f2164 | numeric(16,4) | | | | main |
+ f2n | numeric | | | | main |
+View definition:
+ SELECT f1,
+ f1::numeric(16,4) AS f1164,
+ f1::numeric AS f1n,
+ f2,
+ f2::numeric(16,4) AS f2164,
+ f2 AS f2n
+ FROM numeric_tbl;
-explain (costs off)
-select count(*) from date_tbl
- where f1 not between '1997-01-01' and '1998-01-01';
- QUERY PLAN
---------------------------------------------------------------------------------
- Aggregate
- -> Gather Motion 3:1 (slice1; segments: 3)
- -> Seq Scan on date_tbl
- Filter: ((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date))
- Optimizer: Pivotal Optimizer (GPORCA)
+explain (verbose, costs off) select * from numeric_view;
+ QUERY PLAN
+-------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Output: f1, ((f1)::numeric(16,4)), ((f1)::numeric), f2, ((f2)::numeric(16,4)), f2
+ -> Seq Scan on public.numeric_tbl
+ Output: f1, (f1)::numeric(16,4), f1, f2, (f2)::numeric(16,4), f2
(5 rows)
-select count(*) from date_tbl
- where f1 not between '1997-01-01' and '1998-01-01';
- count
--------
- 13
-(1 row)
-
-explain (costs off)
-select count(*) from date_tbl
- where f1 between symmetric '1997-01-01' and '1998-01-01';
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------
- Aggregate
- -> Gather Motion 3:1 (slice1; segments: 3)
- -> Seq Scan on date_tbl
- Filter: (((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date)) OR ((f1 >= '01-01-1998'::date) AND (f1 <= '01-01-1997'::date)))
- Optimizer: Pivotal Optimizer (GPORCA)
-(5 rows)
+-- bpchar, lacking planner support for its length coercion function,
+-- could behave differently
+create table bpchar_tbl (f1 character(16) unique, f2 bpchar);
+create view bpchar_view as
+ select
+ f1, f1::character(14) as f114, f1::bpchar as f1n,
+ f2, f2::character(14) as f214, f2::bpchar as f2n
+ from bpchar_tbl;
+\d+ bpchar_view
+ View "public.bpchar_view"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------------+-----------+----------+---------+----------+-------------
+ f1 | character(16) | | | | extended |
+ f114 | character(14) | | | | extended |
+ f1n | bpchar | | | | extended |
+ f2 | bpchar | | | | extended |
+ f214 | character(14) | | | | extended |
+ f2n | bpchar | | | | extended |
+View definition:
+ SELECT f1,
+ f1::character(14) AS f114,
+ f1::bpchar AS f1n,
+ f2,
+ f2::character(14) AS f214,
+ f2 AS f2n
+ FROM bpchar_tbl;
-select count(*) from date_tbl
- where f1 between symmetric '1997-01-01' and '1998-01-01';
- count
--------
- 3
-(1 row)
+explain (verbose, costs off) select * from bpchar_view
+ where f1::bpchar = 'foo';
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1)
+ Output: f1, ((f1)::character(14)), ((f1)::bpchar), f2, ((f2)::character(14)), f2
+ -> Index Scan using bpchar_tbl_f1_key on public.bpchar_tbl
+ Output: f1, (f1)::character(14), f1, f2, (f2)::character(14), f2
+ Index Cond: ((bpchar_tbl.f1)::bpchar = 'foo'::bpchar)
+(6 rows)
-explain (costs off)
-select count(*) from date_tbl
- where f1 not between symmetric '1997-01-01' and '1998-01-01';
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------
- Aggregate
- -> Gather Motion 3:1 (slice1; segments: 3)
- -> Seq Scan on date_tbl
- Filter: (((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date)) AND ((f1 < '01-01-1998'::date) OR (f1 > '01-01-1997'::date)))
- Optimizer: Pivotal Optimizer (GPORCA)
-(5 rows)
+rollback;
+--
+-- Ordinarily, IN/NOT IN can be converted to a ScalarArrayOpExpr
+-- with a suitably-chosen array type.
+--
+explain (verbose, costs off)
+select random() IN (1, 4, 8.0);
+ QUERY PLAN
+------------------------------------------------------------
+ Result
+ Output: (random() = ANY ('{1,4,8}'::double precision[]))
+(3 rows)
-select count(*) from date_tbl
- where f1 not between symmetric '1997-01-01' and '1998-01-01';
- count
--------
- 13
-(1 row)
+explain (verbose, costs off)
+select random()::int IN (1, 4, 8.0);
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Result
+ Output: (((random())::integer)::numeric = ANY ('{1,4,8.0}'::numeric[]))
+(3 rows)
+-- However, if there's not a common supertype for the IN elements,
+-- we should instead try to produce "x = v1 OR x = v2 OR ...".
+-- In most cases that'll fail for lack of all the requisite = operators,
+-- but it can succeed sometimes. So this should complain about lack of
+-- an = operator, not about cast failure.
+select '(0,0)'::point in ('(0,0,0,0)'::box, point(0,0));
+ERROR: operator does not exist: point = box
+LINE 1: select '(0,0)'::point in ('(0,0,0,0)'::box, point(0,0));
+ ^
+HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
--
-- Test parsing of a no-op cast to a type with unspecified typmod
--
@@ -193,12 +244,12 @@ create view numeric_view as
f2164 | numeric(16,4) | | | | main |
f2n | numeric | | | | main |
View definition:
- SELECT numeric_tbl.f1,
- numeric_tbl.f1::numeric(16,4) AS f1164,
- numeric_tbl.f1::numeric AS f1n,
- numeric_tbl.f2,
- numeric_tbl.f2::numeric(16,4) AS f2164,
- numeric_tbl.f2 AS f2n
+ SELECT f1,
+ f1::numeric(16,4) AS f1164,
+ f1::numeric AS f1n,
+ f2,
+ f2::numeric(16,4) AS f2164,
+ f2 AS f2n
FROM numeric_tbl;
explain (verbose, costs off) select * from numeric_view;
@@ -208,7 +259,6 @@ explain (verbose, costs off) select * from numeric_view;
Output: f1, ((f1)::numeric(16,4)), ((f1)::numeric), f2, ((f2)::numeric(16,4)), f2
-> Seq Scan on public.numeric_tbl
Output: f1, (f1)::numeric(16,4), f1, f2, (f2)::numeric(16,4), f2
- Optimizer: Pivotal Optimizer (GPORCA)
(5 rows)
-- bpchar, lacking planner support for its length coercion function,
@@ -230,12 +280,12 @@ create view bpchar_view as
f214 | character(14) | | | | extended |
f2n | bpchar | | | | extended |
View definition:
- SELECT bpchar_tbl.f1,
- bpchar_tbl.f1::character(14) AS f114,
- bpchar_tbl.f1::bpchar AS f1n,
- bpchar_tbl.f2,
- bpchar_tbl.f2::character(14) AS f214,
- bpchar_tbl.f2 AS f2n
+ SELECT f1,
+ f1::character(14) AS f114,
+ f1::bpchar AS f1n,
+ f2,
+ f2::character(14) AS f214,
+ f2 AS f2n
FROM bpchar_tbl;
explain (verbose, costs off) select * from bpchar_view
@@ -247,7 +297,6 @@ explain (verbose, costs off) select * from bpchar_view
-> Index Scan using bpchar_tbl_f1_key on public.bpchar_tbl
Output: f1, (f1)::character(14), f1, f2, (f2)::character(14), f2
Index Cond: ((bpchar_tbl.f1)::bpchar = 'foo'::bpchar)
- Optimizer: Pivotal Optimizer (GPORCA)
(6 rows)
rollback;
@@ -261,7 +310,7 @@ select random() IN (1, 4, 8.0);
------------------------------------------------------------
Result
Output: (random() = ANY ('{1,4,8}'::double precision[]))
-(2 rows)
+(3 rows)
explain (verbose, costs off)
select random()::int IN (1, 4, 8.0);
@@ -269,7 +318,7 @@ select random()::int IN (1, 4, 8.0);
---------------------------------------------------------------------------
Result
Output: (((random())::integer)::numeric = ANY ('{1,4,8.0}'::numeric[]))
-(2 rows)
+(3 rows)
-- However, if there's not a common supertype for the IN elements,
-- we should instead try to produce "x = v1 OR x = v2 OR ...".
@@ -339,6 +388,55 @@ select return_text_input('a') in ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', '
t
(1 row)
+-- NOT IN
+select return_int_input(1) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1);
+ ?column?
+----------
+ f
+(1 row)
+
+select return_int_input(1) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 0);
+ ?column?
+----------
+ t
+(1 row)
+
+select return_int_input(1) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 2, null);
+ ?column?
+----------
+
+(1 row)
+
+select return_int_input(1) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1, null);
+ ?column?
+----------
+ f
+(1 row)
+
+select return_int_input(1) not in (null, null, null, null, null, null, null, null, null, null, null);
+ ?column?
+----------
+
+(1 row)
+
+select return_int_input(null::int) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1);
+ ?column?
+----------
+
+(1 row)
+
+select return_int_input(null::int) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, null);
+ ?column?
+----------
+
+(1 row)
+
+select return_text_input('a') not in ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j');
+ ?column?
+----------
+ f
+(1 row)
+
rollback;
-- Test with non-strict equality function.
-- We need to create our own type for this.
@@ -365,6 +463,11 @@ begin
end if;
end;
$$ language plpgsql immutable;
+create function myintne(myint, myint) returns bool as $$
+begin
+ return not myinteq($1, $2);
+end;
+$$ language plpgsql immutable;
create operator = (
leftarg = myint,
rightarg = myint,
@@ -375,6 +478,16 @@ create operator = (
join = eqjoinsel,
merges
);
+create operator <> (
+ leftarg = myint,
+ rightarg = myint,
+ commutator = <>,
+ negator = =,
+ procedure = myintne,
+ restrict = eqsel,
+ join = eqjoinsel,
+ merges
+);
create operator class myint_ops
default for type myint using hash as
operator 1 = (myint, myint),
@@ -385,17 +498,37 @@ insert into inttest values(1::myint),(null);
select * from inttest where a in (1::myint,2::myint,3::myint,4::myint,5::myint,6::myint,7::myint,8::myint,9::myint, null);
a
---
-
1
+
(2 rows)
+select * from inttest where a not in (1::myint,2::myint,3::myint,4::myint,5::myint,6::myint,7::myint,8::myint,9::myint, null);
+ a
+---
+(0 rows)
+
+select * from inttest where a not in (0::myint,2::myint,3::myint,4::myint,5::myint,6::myint,7::myint,8::myint,9::myint, null);
+ a
+---
+(0 rows)
+
-- ensure the result matched with the non-hashed version. We simply remove
-- some array elements so that we don't reach the hashing threshold.
select * from inttest where a in (1::myint,2::myint,3::myint,4::myint,5::myint, null);
a
---
- 1
+ 1
(2 rows)
+select * from inttest where a not in (1::myint,2::myint,3::myint,4::myint,5::myint, null);
+ a
+---
+(0 rows)
+
+select * from inttest where a not in (0::myint,2::myint,3::myint,4::myint,5::myint, null);
+ a
+---
+(0 rows)
+
rollback;
diff --git a/src/test/regress/expected/external_table_optimizer.out b/src/test/regress/expected/external_table_optimizer.out
index b5af22445d1..e41a30362cc 100644
--- a/src/test/regress/expected/external_table_optimizer.out
+++ b/src/test/regress/expected/external_table_optimizer.out
@@ -23,7 +23,14 @@
-- m/DETAIL: Found \d+ URLs and \d+ primary segments./
-- s/Found.+//
--
+-- # normalize absolute source paths across environments (also handles file:// URIs)
+-- m|/.+/src/test/regress/|
+-- s|/.+/src/test/regress/|/ABSPATH/src/test/regress/|
+--
-- end_matchsubs
+\getenv abs_srcdir PG_ABS_SRCDIR
+\getenv hostname PG_HOSTNAME
+\set nation_tbl 'file://' :hostname :abs_srcdir '/data/nation.tbl'
CREATE TABLE REG_REGION (R_REGIONKEY INT, R_NAME CHAR(25), R_COMMENT VARCHAR(152)) DISTRIBUTED BY (R_REGIONKEY);
-- --------------------------------------
-- 'file' protocol - (only CREATE, don't SELECT - won't work on distributed installation)
@@ -32,10 +39,10 @@ CREATE EXTERNAL TABLE EXT_NATION ( N_NATIONKEY INTEGER ,
N_NAME CHAR(25) ,
N_REGIONKEY INTEGER ,
N_COMMENT VARCHAR(152))
-location ('file://@hostname@@abs_srcdir@/data/nation.tbl' )
+location (:'nation_tbl' )
FORMAT 'text' (delimiter '|');
CREATE EXTERNAL TABLE EXT_REGION (LIKE REG_REGION)
-location ('file://@hostname@@abs_srcdir@/data/region.tbl' )
+location (:'nation_tbl' )
FORMAT 'text' (delimiter '|');
-- Only tables with custom protocol should create dependency, due to a bug there
-- used to be entries created for non custom protocol tables with refobjid=0.
@@ -144,8 +151,9 @@ SELECT * FROM table_env WHERE val LIKE 'GP_QUERY%\%' ESCAPE '&' ORDER BY val ASC
(1 row)
-- ensure squelching on master
+\set lineitem 'cat ' :abs_srcdir '/data/lineitem.csv'
CREATE EXTERNAL WEB TABLE table_master (val TEXT)
- EXECUTE E'cat @abs_srcdir@/data/lineitem.csv' ON MASTER
+ EXECUTE E:'lineitem' ON MASTER
FORMAT 'TEXT' (ESCAPE 'OFF');
BEGIN;
DECLARE _psql_cursor NO SCROLL CURSOR FOR SELECT 1 FROM table_master;
@@ -192,11 +200,12 @@ drop external web table ext_stderr2;
--
-- bad csv (quote must be a single char)
--
+\set whois_file 'gpfdist://' :hostname ':7070/exttab1/whois.csv'
create external table bad_whois (
source_lineno int,
domain_name varchar(350)
)
-location ('gpfdist://@hostname@:7070/exttab1/whois.csv' )
+location (:'whois_file' )
format 'csv' ( header quote as 'ggg');
ERROR: COPY quote must be a single one-byte character
select count(*) from bad_whois;
@@ -208,26 +217,28 @@ ERROR: foreign table "bad_whois" does not exist
--
-- try a bad location
--
+\set badt1_file 'file://' :hostname :abs_srcdir '/data/no/such/place/badt1.tbl'
create external table badt1 (x text)
-location ('file://@hostname@@abs_srcdir@/data/no/such/place/badt1.tbl' )
+location (:'badt1_file' )
format 'text' (delimiter '|');
select * from badt1;
-NOTICE: gfile stat @abs_srcdir@/data/no/such/place/badt1.tbl failure: No such file or directory (seg0 slice1 @hostname@:50000 pid=64819)
-NOTICE: fstream unable to open file @abs_srcdir@/data/no/such/place/badt1.tbl (seg0 slice1 @hostname@:50000 pid=64819)
-ERROR: could not open file "@abs_srcdir@/data/no/such/place/badt1.tbl": 404 file not found (seg0 slice1 @hostname@:50000 pid=64819)
+ERROR: could not open file "/ABSPATH/src/test/regress/data/no/such/place/badt1.tbl": 404 file not found
+NOTICE: fstream unable to open file /ABSPATH/src/test/regress/data/no/such/place/badt1.tbl
+NOTICE: gfile stat /ABSPATH/src/test/regress/data/no/such/place/badt1.tbl failure: No such file or directory
drop external table badt1;
--
-- try a bad protocol
--
+\set baadt2_file 'bad_protocol://' :hostname :abs_srcdir '/data/no/such/place/badt2.tbl'
create external table badt2 (x text)
-location ('bad_protocol://@hostname@@abs_srcdir@/data/no/such/place/badt2.tbl' )
+location (:'baadt2_file' )
format 'text' (delimiter '|');
ERROR: protocol "bad_protocol" does not exist
--
-- ALTER (partial support)
--
create external table ext (a int, x text)
-location ('file://@hostname@@abs_srcdir@/data/no/such/place/badt1.tbl' )
+location (:'badt1_file' )
format 'text';
alter table ext drop column a; -- should pass
alter external table ext add column a int; -- pass
@@ -249,10 +260,10 @@ ERROR: cannot update foreign table "ext"
insert into ext(x) values(123);
ERROR: foreign table "ext" does not allow inserts
create index ext_index on ext(x); -- should fail
-ERROR: cannot create index on foreign table "ext"
+DETAIL: This operation is not supported for foreign tables.
+ERROR: cannot create index on relation "ext"
drop table ext; -- should fail (wrong object)
ERROR: "ext" is not a table
-HINT: Use DROP FOREIGN TABLE to remove a foreign table.
drop external table ext;
----------------------------------------------------------------------
-- CUSTOM PROTOCOLS
@@ -300,7 +311,7 @@ GRANT INSERT ON PROTOCOL demoprot TO extprotu;
\t on
-- print with tuples-only mode : suppress diff related to user string length.
SELECT ptcname, ptcacl FROM PG_EXTPROTOCOL WHERE ptcname = 'demoprot';
- demoprot | {@curusername@=ar/@curusername@,extprotu=ar/@curusername@}
+ demoprot | {gpadmin=ar/gpadmin,extprotu=ar/gpadmin}
\t off
SET SESSION AUTHORIZATION extprotu;
@@ -408,9 +419,9 @@ DROP EXTERNAL TABLE public.test_ext;
-- positive
create writable external web table wet_pos4(a text, b text) execute 'some command' format 'text';
-- negative
-create writable external table wet_neg1(a text, b text) location('file://@hostname@@abs_srcdir@/badt1.tbl') format 'text';
+\set badt1_file2 'file://' :hostname :abs_srcdir '/badt1.tbl'
+create writable external table wet_neg1(a text, b text) location(:'badt1_file2') format 'text';
ERROR: unsupported URI protocol 'file' for writable external table
-HINT: Writable external tables may use 'gpfdist' or 'gpfdists' URIs only.
create writable external table wet_neg1(a text, b text) location('gpfdist://foo:7070/wet.out', 'gpfdist://foo:7070/wet.out') format 'text';
ERROR: duplicate location uri
LINE 1: ...t, b text) location('gpfdist://foo:7070/wet.out', 'gpfdist:/...
@@ -426,8 +437,9 @@ HINT: Create the table as READABLE instead.
-- scans, because the planner generated plans that used the CTID attribute
-- to implement certain semi-joins. Nowadays, we use generated row IDs in
-- such plans, and don't need CTID for that purpose anymore.
+\set mpp17980_file2 'file://' :hostname :abs_srcdir '/data/mpp17980.data'
CREATE EXTERNAL TABLE ext_mpp17980 ( id int , id1 int , id2 int)
-LOCATION ('file://@hostname@@abs_srcdir@/data/mpp17980.data')
+LOCATION (:'mpp17980_file2')
FORMAT 'CSV' ( DELIMITER ',' NULL ' ');
CREATE TABLE mpp17980 (id int, date date, amt decimal(10,2))
DISTRIBUTED randomly PARTITION BY RANGE (date)
@@ -471,9 +483,10 @@ SELECT ctid, * FROM ext_mpp17980;
DROP EXTERNAL TABLE ext_mpp17980;
DROP TABLE mpp17980;
-COPY (VALUES('1,2'),('1,2,3'),('1,'),('1')) TO '@abs_srcdir@/data/tableless.csv';
+\set tableless_file :abs_srcdir '/data/tableless.csv'
+COPY (VALUES('1,2'),('1,2,3'),('1,'),('1')) TO :'tableless_file';
CREATE TABLE tableless_heap(a int, b int);
-COPY tableless_heap FROM '@abs_srcdir@/data/tableless.csv' CSV LOG ERRORS SEGMENT REJECT LIMIT 10;
+COPY tableless_heap FROM :'tableless_file' CSV LOG ERRORS SEGMENT REJECT LIMIT 10;
NOTICE: found 2 data formatting errors (2 or more input rows), rejected related input data
SELECT relname, linenum, errmsg FROM gp_read_error_log('tableless_heap');
relname | linenum | errmsg
@@ -500,8 +513,9 @@ SELECT relname, linenum, errmsg FROM gp_read_error_log('tableless_heap');
---------+---------+--------
(0 rows)
+\set tableless_file2 'file://' :hostname :abs_srcdir '/data/tableless.csv'
create external table tableless_ext(a int, b int)
-location ('file://@hostname@@abs_srcdir@/data/tableless.csv')
+location (:'tableless_file2')
format 'csv'
log errors segment reject limit 10;
select * from tableless_ext;
@@ -617,8 +631,9 @@ insert into wet_too_many_uris values ('foo', 'bar');
ERROR: external table has more URLs than available primary segments that can write into them (seg2 127.0.0.1:40002 pid=24162)
-- Test for error log functionality
-- Scan with no errors
+\set exttab_file 'file://' :hostname :abs_srcdir '/data/exttab.data'
CREATE EXTERNAL TABLE exttab_basic_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
-- Empty error log
SELECT * FROM gp_read_error_log('exttab_basic_1');
@@ -639,8 +654,9 @@ SELECT * FROM gp_read_error_log('exttab_basic_1');
(0 rows)
-- test ON COORDINATOR without LOG ERRORS, return empty results for all rows error out
+\set cat_exttab 'cat ' :abs_srcdir '/data/exttab.data'
CREATE EXTERNAL WEB TABLE exttab_basic_error_1( i int )
-EXECUTE E'cat @abs_srcdir@/data/exttab.data' ON COORDINATOR
+EXECUTE E:'cat_exttab' ON COORDINATOR
FORMAT 'TEXT' (DELIMITER '|')
SEGMENT REJECT LIMIT 20;
SELECT * FROM exttab_basic_error_1;
@@ -652,7 +668,7 @@ NOTICE: found 10 data formatting errors (10 or more input rows), rejected relat
DROP EXTERNAL TABLE IF EXISTS exttab_basic_error_1;
-- test ON MASTER still works (this syntax will be removed in GPDB8 and forward)
CREATE EXTERNAL WEB TABLE exttab_basic_error_1( i int )
-EXECUTE E'cat @abs_srcdir@/data/exttab.data' ON MASTER
+EXECUTE E:'cat_exttab' ON MASTER
FORMAT 'TEXT' (DELIMITER '|')
SEGMENT REJECT LIMIT 20;
SELECT * FROM exttab_basic_error_1;
@@ -662,8 +678,9 @@ NOTICE: found 10 data formatting errors (10 or more input rows), rejected relat
(0 rows)
-- Some errors without exceeding reject limit
+\set exttab_few_errors_file 'file://' :hostname :abs_srcdir '/data/exttab_few_errors.data'
CREATE EXTERNAL TABLE exttab_basic_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- should not error out as segment reject limit will not be reached
SELECT * FROM exttab_basic_2 order by i;
@@ -688,8 +705,9 @@ select count(*) from gp_read_error_log('exttab_basic_2');
(1 row)
-- Errors with exceeding reject limit
+\set exttab_more_errors_file 'file://' :hostname :abs_srcdir '/data/exttab_more_errors.data'
CREATE EXTERNAL TABLE exttab_basic_3( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
-- should error out as segment reject limit will be reached
SELECT * FROM exttab_basic_3;
@@ -705,10 +723,9 @@ select count(*) > 0 from gp_read_error_log('exttab_basic_3');
-- Insert into another table
CREATE EXTERNAL TABLE exttab_basic_4( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 100;
CREATE TABLE exttab_insert_1 (LIKE exttab_basic_4);
-NOTICE: table doesn't have 'DISTRIBUTED BY' clause, defaulting to distribution columns from LIKE table
-- Insert should go through fine
INSERT INTO exttab_insert_1 SELECT * FROM exttab_basic_4;
NOTICE: found 6 data formatting errors (6 or more input rows), rejected related input data
@@ -721,13 +738,13 @@ select count(*) > 0 from gp_read_error_log('exttab_basic_4');
-- Use the same error log above
CREATE EXTERNAL TABLE exttab_basic_5( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 5;
-- Insert should fail
INSERT INTO exttab_insert_1 select * from exttab_basic_5;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12121)
-CONTEXT: External table exttab_basic_5, line 18 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_basic_5, line 18 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT * from exttab_insert_1 order by i;
i | j
----+-----------
@@ -756,11 +773,9 @@ SELECT count(*) from gp_read_error_log('exttab_basic_5');
-- CTAS
CREATE EXTERNAL TABLE exttab_basic_6( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 100;
CREATE TABLE exttab_ctas_1 as SELECT * FROM exttab_basic_6;
-NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Apache Cloudberry data distribution key for this table.
-HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
NOTICE: found 6 data formatting errors (6 or more input rows), rejected related input data
-- CTAS should go through fine
SELECT * FROM exttab_ctas_1 order by i;
@@ -790,7 +805,7 @@ select count(*) from gp_read_error_log('exttab_basic_6');
(1 row)
CREATE EXTERNAL TABLE exttab_basic_7( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 5;
-- CTAS should fail
CREATE TABLE exttab_ctas_2 AS select * from exttab_basic_7;
@@ -815,7 +830,7 @@ SELECT count(*) from gp_read_error_log('exttab_basic_7');
DROP EXTERNAL TABLE IF EXISTS exttab_error_log;
NOTICE: foreign table "exttab_error_log" does not exist, skipping
CREATE EXTERNAL TABLE exttab_error_log( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
SELECT COUNT(*) FROM exttab_error_log;
count
@@ -838,7 +853,7 @@ SELECT COUNT(*) FROM gp_read_error_log('exttab_error_log');
-- Insert into another table with unique constraints
CREATE EXTERNAL TABLE exttab_constraints_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- Should not error out
SELECT COUNT(*) FROM exttab_constraints_1;
@@ -874,11 +889,11 @@ SELECT COUNT(*) FROM gp_read_error_log('exttab_constraints_1');
-- CTE with segment reject limit reached
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_cte_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit, use the same err table
CREATE EXTERNAL TABLE exttab_cte_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
with cte1 as
(
@@ -886,9 +901,9 @@ SELECT e1.i, e2.j FROM exttab_cte_2 e1, exttab_cte_1 e2
WHERE e1.i = e2.i ORDER BY e1.i
)
SELECT * FROM cte1 ORDER BY cte1.i;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12121)
-CONTEXT: External table exttab_cte_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_cte_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
select count(*) from gp_read_error_log('exttab_cte_2');
count
-------
@@ -934,11 +949,11 @@ NOTICE: found 4 data formatting errors (4 or more input rows), rejected related
-- Check permissions with gp_truncate_error_log and gp_read_error_log
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_permissions_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit
CREATE EXTERNAL TABLE exttab_permissions_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
-- generate some error logs
SELECT COUNT(*) FROM exttab_permissions_1;
@@ -1025,9 +1040,8 @@ NOTICE: resource queue required -- using default resource queue "pg_default"
CREATE DATABASE exttab_db WITH OWNER=exttab_user1;
\c exttab_db
-- generate some error logs in this db
-NOTICE: table "exttab_permissions_1" does not exist, skipping
CREATE EXTERNAL TABLE exttab_permissions_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
SELECT COUNT(*) FROM exttab_permissions_1 e1, exttab_permissions_1 e2;
NOTICE: found 4 data formatting errors (4 or more input rows), rejected related input data
@@ -1078,10 +1092,9 @@ DROP ROLE IF EXISTS errlog_exttab_user4;
CREATE ROLE errlog_exttab_user3 WITH NOSUPERUSER LOGIN;
NOTICE: resource queue required -- using default resource queue "pg_default"
CREATE ROLE errlog_exttab_user4 WITH NOSUPERUSER LOGIN;
-NOTICE: resource queue required -- using default resource queue "pg_default"
-- generate some error logs in this db
CREATE EXTERNAL TABLE exttab_permissions_3( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
SELECT COUNT(*) FROM exttab_permissions_3 e1, exttab_permissions_3 e2;
NOTICE: found 4 data formatting errors (4 or more input rows), rejected related input data
@@ -1148,11 +1161,11 @@ SELECT * FROM gp_read_error_log('exttab_permissions_3');
-- Subqueries reaching segment reject limit
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_subq_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit, use the same err table
CREATE EXTERNAL TABLE exttab_subq_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
SELECT sum(distinct e1.i), sum(distinct e2.i), e1.j FROM
(SELECT i, j FROM exttab_subq_1 WHERE i < 5 ) e1,
@@ -1286,9 +1299,9 @@ SELECT gp_truncate_error_log('exttab_subq_2');
SELECT ( SELECT i FROM exttab_subq_2 WHERE i <= e1.i) as i, e1.j
FROM exttab_subq_2 e1, exttab_subq_1 e2
WHERE e1.i = e2.i;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12217)
-CONTEXT: External table exttab_subq_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_subq_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
-- CSQ
SELECT gp_truncate_error_log('exttab_subq_1');
gp_truncate_error_log
@@ -1306,9 +1319,9 @@ SELECT e1.i , e1.j FROM
exttab_subq_1 e1, exttab_subq_1 e2
WHERE e1.j = e2.j and
e1.i + 1 IN ( SELECT i from exttab_subq_2 WHERE i <= e1.i);
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12226)
-CONTEXT: External table exttab_subq_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_subq_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT COUNT(*) > 0 FROM
(
SELECT * FROM gp_read_error_log('exttab_subq_1')
@@ -1335,9 +1348,9 @@ SELECT gp_truncate_error_log('exttab_subq_2');
SELECT ( SELECT i FROM exttab_subq_2 WHERE i <= e1.i) as i, e1.j
FROM exttab_subq_2 e1, exttab_subq_1 e2
WHERE e1.i = e2.i;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice3 127.0.0.1:40000 pid=12211)
-CONTEXT: External table exttab_subq_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_subq_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT COUNT(*) > 0 FROM
(
SELECT * FROM gp_read_error_log('exttab_subq_1')
@@ -1352,11 +1365,12 @@ SELECT * FROM gp_read_error_log('exttab_subq_2')
-- TRUNCATE / delete / write to error logs within subtransactions
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_subtxs_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit, use the same err table
+\set exttab_more_errors_file2 'file://' :hostname ':' :abs_srcdir '/data/exttab_more_errors.data'
CREATE EXTERNAL TABLE exttab_subtxs_2( i int, j text )
-LOCATION ('file://@hostname@:@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file2') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
-- Populate error logs before transaction
SELECT e1.i, e2.j FROM
@@ -1464,9 +1478,9 @@ SELECT e1.i, e2.j FROM
(SELECT i, j FROM exttab_subtxs_2 WHERE i < 5 ) e1,
(SELECT i, j FROM exttab_subtxs_2 WHERE i < 10) e2
WHERE e1.i = e2.i;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice2 127.0.0.1:40000 pid=12211)
-CONTEXT: External table exttab_subtxs_2, line 7 of file://@hostname@:@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_subtxs_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
COMMIT;
-- Error logs should not have been rolled back.
-- Check that number of errors is greater than 12 instead of checking for
@@ -1487,11 +1501,11 @@ SELECT * FROM gp_read_error_log('exttab_subtxs_2')
-- TRUNCATE error logs within tx , abort transaction
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_txs_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit, use the same err table
CREATE EXTERNAL TABLE exttab_txs_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
-- Populate error log before transaction
SELECT e1.i, e2.j FROM
@@ -1601,9 +1615,9 @@ SELECT e1.i, e2.j FROM
(SELECT i, j FROM exttab_txs_1 WHERE i < 5 ) e1,
(SELECT i, j FROM exttab_txs_2 WHERE i < 10) e2
WHERE e1.i = e2.i;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice2 127.0.0.1:40000 pid=12211)
-CONTEXT: External table exttab_txs_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_txs_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
COMMIT;
-- Additional error rows should have been inserted into the error logs even if the tx is aborted.
-- Truncate of error logs should not be rolled back even if the transaction is aborted. All operation on error logs are persisted.
@@ -1643,11 +1657,11 @@ BEGIN;
-- create an external table that will reach segment reject limit
-- reaches reject limit
CREATE EXTERNAL TABLE exttab_txs_3( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
-- new error log, within segment reject limit
CREATE EXTERNAL TABLE exttab_txs_4( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
SELECT e1.i, e2.j FROM
(SELECT i, j FROM exttab_txs_4 WHERE i < 5 ) e1,
@@ -1673,9 +1687,9 @@ SELECT e1.i, e2.j FROM
(SELECT i, j FROM exttab_txs_3 WHERE i < 5 ) e1,
(SELECT i, j FROM exttab_txs_4 WHERE i < 10) e2
WHERE e1.i = e2.i order by e1.i;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12217)
-CONTEXT: External table exttab_txs_3, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_txs_3, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
COMMIT;
-- Error logs should not exist for these tables that would have been rolled back
SELECT count(*) FROM gp_read_error_log('exttab_txs_3');
@@ -1702,11 +1716,11 @@ LINE 1: SELECT count(*) FROM exttab_txs_4;
-- UDFS with segment reject limit reached
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_udfs_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit, use the same err table
CREATE EXTERNAL TABLE exttab_udfs_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
CREATE OR REPLACE FUNCTION exttab_udfs_func1 ()
RETURNS boolean
@@ -1847,10 +1861,10 @@ SELECT gp_truncate_error_log('exttab_udfs_2');
-- All this should fail, error logs should be populated even if the UDF gets aborted as we persist error rows written within aborted txs.
SELECT * FROM exttab_udfs_func2();
-NOTICE: found 2 data formatting errors (2 or more input rows), rejected related input data
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12226)
-CONTEXT: External table exttab_udfs_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+NOTICE: found 2 data formatting errors (2 or more input rows), rejected related input data
+CONTEXT: External table exttab_udfs_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT COUNT(*) > 0 FROM
(
SELECT * FROM gp_read_error_log('exttab_udfs_1')
@@ -1875,10 +1889,10 @@ SELECT gp_truncate_error_log('exttab_udfs_2');
(1 row)
SELECT exttab_udfs_func2();
-NOTICE: found 2 data formatting errors (2 or more input rows), rejected related input data
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12217)
-CONTEXT: External table exttab_udfs_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+NOTICE: found 2 data formatting errors (2 or more input rows), rejected related input data
+CONTEXT: External table exttab_udfs_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT COUNT(*) > 0 FROM
(
SELECT * FROM gp_read_error_log('exttab_udfs_1')
@@ -1903,10 +1917,10 @@ SELECT gp_truncate_error_log('exttab_udfs_2');
(1 row)
INSERT INTO exttab_udfs_insert_1 SELECT * FROM exttab_udfs_func2();
-NOTICE: found 2 data formatting errors (2 or more input rows), rejected related input data
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12226)
-CONTEXT: External table exttab_udfs_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+NOTICE: found 2 data formatting errors (2 or more input rows), rejected related input data
+CONTEXT: External table exttab_udfs_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT COUNT(*) > 0 FROM
(
SELECT * FROM gp_read_error_log('exttab_udfs_1')
@@ -1931,12 +1945,10 @@ SELECT gp_truncate_error_log('exttab_udfs_2');
(1 row)
CREATE TABLE exttab_udfs_ctas_2 AS SELECT * FROM exttab_udfs_func2();
-NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'exttab_udfs_func2' as the Apache Cloudberry data distribution key for this table.
-HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-NOTICE: found 2 data formatting errors (2 or more input rows), rejected related input data
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12217)
-CONTEXT: External table exttab_udfs_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+NOTICE: found 2 data formatting errors (2 or more input rows), rejected related input data
+CONTEXT: External table exttab_udfs_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT COUNT(*) > 0 FROM
(
SELECT * FROM gp_read_error_log('exttab_udfs_1')
@@ -1961,11 +1973,11 @@ NOTICE: foreign table "exttab_union_1" does not exist, skipping
DROP EXTERNAL TABLE IF EXISTS exttab_union_2;
NOTICE: foreign table "exttab_union_2" does not exist, skipping
CREATE EXTERNAL TABLE exttab_union_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit
CREATE EXTERNAL TABLE exttab_union_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
-- Should error out as exttab_union_2 would reach it's reject limit
SELECT * FROM
@@ -1975,9 +1987,9 @@ UNION
SELECT * FROM exttab_union_2
) FOO
order by FOO.i;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12226)
-CONTEXT: External table exttab_union_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_union_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
-- Error table count
select count(*) > 0 from
(
@@ -1992,9 +2004,7 @@ SELECT * FROM gp_read_error_log('exttab_union_2')
-- Insert into another table, with and without segment reject limits being reached
DROP TABLE IF EXISTS exttab_union_insert_1;
-NOTICE: table "exttab_union_insert_1" does not exist, skipping
CREATE TABLE exttab_union_insert_1 (LIKE exttab_union_1);
-NOTICE: table doesn't have 'DISTRIBUTED BY' clause, defaulting to distribution columns from LIKE table
SELECT gp_truncate_error_log('exttab_union_1');
gp_truncate_error_log
-----------------------
@@ -2011,9 +2021,9 @@ insert into exttab_union_insert_1
SELECT e1.i, e2.j from exttab_union_2 e1 INNER JOIN exttab_union_2 e2 ON e1.i = e2.i
UNION ALL
SELECT e1.i, e2.j from exttab_union_2 e1 INNER JOIN exttab_union_2 e2 ON e1.i = e2.i;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12226)
-CONTEXT: External table exttab_union_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_union_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
-- should return 0 rows
SELECT * from exttab_union_insert_1;
i | j
@@ -2071,14 +2081,13 @@ DROP EXTERNAL TABLE IF EXISTS exttab_views_2 CASCADE;
NOTICE: foreign table "exttab_views_2" does not exist, skipping
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_views_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit, use the same err table
CREATE EXTERNAL TABLE exttab_views_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
DROP VIEW IF EXISTS exttab_views_3;
-NOTICE: view "exttab_views_3" does not exist, skipping
CREATE VIEW exttab_views_3 as
SELECT sum(distinct e1.i) as sum_i, sum(distinct e2.i) as sum_j, e1.j as j FROM
(SELECT i, j FROM exttab_views_1 WHERE i < 5 ) e1,
@@ -2238,11 +2247,11 @@ DROP EXTERNAL TABLE IF EXISTS exttab_windows_2;
NOTICE: foreign table "exttab_windows_2" does not exist, skipping
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_windows_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit
CREATE EXTERNAL TABLE exttab_windows_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
-- without reaching segment reject limit
with cte1 as(
@@ -2326,9 +2335,9 @@ SELECT * FROM cte1 c1, cte2 c2
WHERE c1.i = c2.i
ORDER BY c1.i
limit 5;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice1 127.0.0.1:40000 pid=12357)
-CONTEXT: External table exttab_windows_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_windows_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT COUNT(*) > 0
FROM
(
@@ -2348,11 +2357,11 @@ DROP EXTERNAL TABLE IF EXISTS exttab_limit_2 cascade;
NOTICE: foreign table "exttab_limit_2" does not exist, skipping
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_limit_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
-- reaches reject limit, use the same err table
CREATE EXTERNAL TABLE exttab_limit_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_more_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_more_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 2;
-- Note that even though we use exttab_limit_2 here , the LIMIT 3 will not throw a segment reject limit error
with cte1 as
@@ -2361,9 +2370,9 @@ SELECT e1.i, e2.j FROM exttab_limit_1 e1, exttab_limit_1 e2
WHERE e1.i = e2.i LIMIT 5
)
SELECT * FROM cte1, exttab_limit_2 e3 where cte1.i = e3.i ORDER BY cte1.i LIMIT 3;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice4 127.0.0.1:40000 pid=12211)
-CONTEXT: External table exttab_limit_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_limit_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT count(*) FROM gp_read_error_log('exttab_limit_2');
count
-------
@@ -2437,9 +2446,9 @@ SELECT e1.i, e2.j FROM exttab_limit_1 e1, exttab_limit_1 e2
WHERE e1.i = e2.i LIMIT 3
)
SELECT * FROM cte1, exttab_limit_2 e3 where cte1.i = e3.i ORDER BY cte1.i LIMIT 5;
+DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i
ERROR: segment reject limit reached, aborting operation
-DETAIL: Last error was: invalid input syntax for type integer: "error_1", column i (seg0 slice4 127.0.0.1:40000 pid=12211)
-CONTEXT: External table exttab_limit_2, line 7 of file://@hostname@@abs_srcdir@/data/exttab_more_errors.data, column i
+CONTEXT: External table exttab_limit_2, line 7 of file:/ABSPATH/src/test/regress/data/exttab_more_errors.data, column i
SELECT count(*) > 0 FROM gp_read_error_log('exttab_limit_2');
?column?
----------
@@ -2531,6 +2540,7 @@ SELECT * FROM gp_read_error_log('exttab_limit_2')
-- the data. If there is a valid row within the first 'n' rows specified by
-- this guc, the database continues to load the data.
-- default should be 1000
+\set exttab_first_errors_file 'file://' :hostname :abs_srcdir '/data/exttab_first_errors.data'
SHOW gp_initial_bad_row_limit;
gp_initial_bad_row_limit
--------------------------
@@ -2540,7 +2550,7 @@ SHOW gp_initial_bad_row_limit;
DROP EXTERNAL TABLE IF EXISTS exttab_first_reject_limit_1 cascade;
NOTICE: foreign table "exttab_first_reject_limit_1" does not exist, skipping
CREATE EXTERNAL TABLE exttab_first_reject_limit_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_first_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_first_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 20000;
-- should fail with an appropriate error message
SELECT COUNT(*) FROM exttab_first_reject_limit_1;
@@ -2578,7 +2588,7 @@ SELECT COUNT(*) FROM gp_read_error_log('exttab_first_reject_limit_1');
DROP EXTERNAL TABLE IF EXISTS exttab_first_reject_limit_2;
NOTICE: foreign table "exttab_first_reject_limit_2" does not exist, skipping
CREATE EXTERNAL TABLE exttab_first_reject_limit_2( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_first_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_first_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 500;
-- should report an error saying first rows were rejected
SET gp_initial_bad_row_limit = 2;
@@ -2632,10 +2642,9 @@ DROP EXTERNAL TABLE IF EXISTS exttab_heap_join_1;
NOTICE: foreign table "exttab_heap_join_1" does not exist, skipping
-- does not reach reject limit
CREATE EXTERNAL TABLE exttab_heap_join_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
LOG ERRORS SEGMENT REJECT LIMIT 10;
DROP TABLE IF EXISTS test_ext_heap_join;
-NOTICE: table "test_ext_heap_join" does not exist, skipping
CREATE TABLE test_ext_heap_join( i int, j text);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
@@ -2655,14 +2664,10 @@ SELECT COUNT(*) FROM gp_read_error_log('exttab_heap_join_1');
2
(1 row)
-\! rm @abs_srcdir@/data/tableless.csv
--- start_ignore
-DROP EXTERNAL TABLE IF EXISTS exttab_with_on_coordinator;
-NOTICE: table "exttab_with_on_coordinator" does not exist, skipping
--- end_ignore
+\! rm $PG_ABS_SRCDIR/data/tableless.csv
-- Create external table with on clause
CREATE EXTERNAL TABLE exttab_with_on_coordinator( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') ON COORDINATOR FORMAT 'TEXT' (DELIMITER '|');
+LOCATION (:'exttab_few_errors_file') ON COORDINATOR FORMAT 'TEXT' (DELIMITER '|');
SELECT * FROM exttab_with_on_coordinator;
ERROR: 'ON COORDINATOR' is not supported by this protocol yet
DROP EXTERNAL TABLE IF EXISTS exttab_with_on_coordinator;
@@ -2676,13 +2681,13 @@ NOTICE: foreign table "exttab_with_options" does not exist, skipping
-- end_ignore
-- Create external table with 'OPTIONS'
CREATE EXTERNAL TABLE exttab_with_option_empty( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
OPTIONS ();
CREATE EXTERNAL TABLE exttab_with_option_1( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
OPTIONS (hello 'world');
CREATE EXTERNAL TABLE exttab_with_options( i int, j text )
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab_few_errors.data') FORMAT 'TEXT' (DELIMITER '|')
+LOCATION (:'exttab_few_errors_file') FORMAT 'TEXT' (DELIMITER '|')
OPTIONS (hello 'world', bonjour 'again', nihao 'again and again' );
\d exttab_with_options
Foreign table "public.exttab_with_options"
@@ -2690,7 +2695,7 @@ OPTIONS (hello 'world', bonjour 'again', nihao 'again and again' );
--------+---------+-----------+----------+---------+-------------
i | integer | | | |
j | text | | | |
-FDW options: (format 'text', delimiter '|', "null" E'\\N', escape E'\\', hello 'world', bonjour 'again', nihao 'again and again', format_type 't', location_uris 'file://@hostname@@abs_srcdir@/data/exttab_few_errors.data', execute_on 'ALL_SEGMENTS', log_errors 'f', encoding '6', is_writable 'false')
+FDW options: (format 'text', delimiter '|', "null" E'\\N', escape E'\\', hello 'world', bonjour 'again', nihao 'again and again', format_type 't', location_uris 'file:/ABSPATH/src/test/regress/data/exttab_few_errors.data', execute_on 'ALL_SEGMENTS', log_errors 'f', encoding '6', is_writable 'false')
\d exttab_with_option_empty
Foreign table "public.exttab_with_option_empty"
@@ -2698,7 +2703,7 @@ FDW options: (format 'text', delimiter '|', "null" E'\\N', escape E'\\', hello '
--------+---------+-----------+----------+---------+-------------
i | integer | | | |
j | text | | | |
-FDW options: (format 'text', delimiter '|', "null" E'\\N', escape E'\\', format_type 't', location_uris 'file://@hostname@@abs_srcdir@/data/exttab_few_errors.data', execute_on 'ALL_SEGMENTS', log_errors 'f', encoding '6', is_writable 'false')
+FDW options: (format 'text', delimiter '|', "null" E'\\N', escape E'\\', format_type 't', location_uris 'file:/ABSPATH/src/test/regress/data/exttab_few_errors.data', execute_on 'ALL_SEGMENTS', log_errors 'f', encoding '6', is_writable 'false')
DROP EXTERNAL TABLE IF EXISTS exttab_with_option_empty;
DROP EXTERNAL TABLE IF EXISTS exttab_with_option_1;
@@ -2716,16 +2721,22 @@ DROP EXTERNAL TABLE IF EXISTS tbl_wet_csv5;
NOTICE: foreign table "tbl_wet_csv5" does not exist, skipping
-- end_ignore
-- Create writable external table with AS for DELIMITER , NULL, ESCAPE
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv1 (a int, b text) EXECUTE 'cat > @abs_srcdir@/data/wet_csv1.tbl' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ');
+\set wet_csv1_file 'cat > ' :abs_srcdir '/data/wet_csv1.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv1 (a int, b text) EXECUTE :'wet_csv1_file' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ');
-- Create writable external table without AS for DELIMITER , NULL, ESCAPE
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv2 (a int, b text) EXECUTE 'cat > @abs_srcdir@/data/wet_csv2.tbl' FORMAT 'CSV' (DELIMITER AS ',' NULL 'null' ESCAPE ' ');
+\set wet_csv2_file 'cat > ' :abs_srcdir '/data/wet_csv2.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv2 (a int, b text) EXECUTE :'wet_csv2_file' FORMAT 'CSV' (DELIMITER AS ',' NULL 'null' ESCAPE ' ');
-- Create writable external table with double quotes
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv3 (a int, b text) EXECUTE 'cat > @abs_srcdir@/data/wet_csv3.tbl' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ' QUOTE AS '"');
+\set wet_csv3_file 'cat > ' :abs_srcdir '/data/wet_csv3.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv3 (a int, b text) EXECUTE :'wet_csv3_file' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ' QUOTE AS '"');
-- Create writable external table with single quotes
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv4 (a int, b text) EXECUTE 'cat > @abs_srcdir@/data/wet_csv4.tbl' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ' QUOTE AS '''');
+\set wet_csv4_file 'cat > ' :abs_srcdir '/data/wet_csv4.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv4 (a int, b text) EXECUTE :'wet_csv4_file' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ' QUOTE AS '''');
-- Create writable external table with force quote
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv5 (a int, b text) EXECUTE 'cat > @abs_srcdir@/data/wet_csv5.tbl' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ' QUOTE AS '"' FORCE QUOTE b);
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv6 (a int, b text, c text) EXECUTE 'cat > @abs_srcdir@/data/wet_csv6.tbl' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ' QUOTE AS '"' FORCE QUOTE *);
+\set wet_csv5_file 'cat > ' :abs_srcdir '/data/wet_csv5.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv5 (a int, b text) EXECUTE :'wet_csv5_file' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ' QUOTE AS '"' FORCE QUOTE b);
+\set wet_csv6_file 'cat > ' :abs_srcdir '/data/wet_csv6.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_csv6 (a int, b text, c text) EXECUTE :'wet_csv6_file' FORMAT 'CSV' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ' QUOTE AS '"' FORCE QUOTE *);
INSERT INTO tbl_wet_csv1 VALUES (generate_series(1,256), 'test_1');
INSERT INTO tbl_wet_csv2 VALUES (generate_series(1,256), 'test_2');
INSERT INTO tbl_wet_csv3 VALUES (generate_series(1,256), 'test_3');
@@ -2738,20 +2749,15 @@ DROP EXTERNAL TABLE IF EXISTS tbl_wet_csv3;
DROP EXTERNAL TABLE IF EXISTS tbl_wet_csv4;
DROP EXTERNAL TABLE IF EXISTS tbl_wet_csv5;
DROP EXTERNAL TABLE IF EXISTS tbl_wet_csv6;
--- start_ignore
-DROP EXTERNAL TABLE IF EXISTS tbl_wet_text1;
-NOTICE: table "tbl_wet_text1" does not exist, skipping
-DROP EXTERNAL TABLE IF EXISTS tbl_wet_text2;
-NOTICE: table "tbl_wet_text2" does not exist, skipping
-DROP EXTERNAL TABLE IF EXISTS tbl_wet_text3;
-NOTICE: table "tbl_wet_text3" does not exist, skipping
--- end_ignore
-- Create writable external table with AS for DELIMITER , NULL, ESCAPE
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_text1 (a int, b text) EXECUTE 'cat > @abs_srcdir@/data/wet_text1.tbl' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ');
+\set wet_text1_file 'cat > ' :abs_srcdir '/data/wet_text1.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_text1 (a int, b text) EXECUTE :'wet_text1_file' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ');
-- Create writable external table without AS for DELIMITER , NULL, ESCAPE
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_text2 (a int, b text) EXECUTE 'cat > @abs_srcdir@/data/wet_text2.tbl' FORMAT 'TEXT' (DELIMITER AS ',' NULL 'null' ESCAPE ' ');
+\set wet_text2_file 'cat > ' :abs_srcdir '/data/wet_text2.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_text2 (a int, b text) EXECUTE :'wet_text2_file' FORMAT 'TEXT' (DELIMITER AS ',' NULL 'null' ESCAPE ' ');
-- Create writable external table with ESCAPE OFF
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_text3 (a int, b text) EXECUTE 'cat > @abs_srcdir@/data/wet_text3.tbl' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE 'OFF');
+\set wet_text3_file 'cat > ' :abs_srcdir '/data/wet_text3.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_text3 (a int, b text) EXECUTE :'wet_text3_file' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE 'OFF');
INSERT INTO tbl_wet_text1 VALUES (generate_series(1,256), 'test_1');
INSERT INTO tbl_wet_text2 VALUES (generate_series(1,256), 'test_2');
INSERT INTO tbl_wet_text3 VALUES (generate_series(1,256), 'test_3');
@@ -2772,9 +2778,12 @@ NOTICE: table "tbl_wet_syntax3" does not exist, skipping
-- end_ignore
CREATE TABLE test_dp1 (a int, b text) DISTRIBUTED RANDOMLY;
CREATE TABLE test_dp2 (a int, b text) DISTRIBUTED BY (b);
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_syntax1 (like test_dp1) EXECUTE 'cat > @abs_srcdir@/data/wet_syntax1.tbl' FORMAT 'TEXT' (DELIMITER '|' ) DISTRIBUTED BY (a);
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_syntax2 (like test_dp2) EXECUTE 'cat > @abs_srcdir@/data/wet_syntax2.tbl' FORMAT 'TEXT' (DELIMITER '|' ) DISTRIBUTED BY (a);
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_syntax3 (like test_dp2) EXECUTE 'cat > @abs_srcdir@/data/wet_syntax3.tbl' FORMAT 'TEXT' (DELIMITER '|' ) DISTRIBUTED RANDOMLY;
+\set wet_syntax1_file 'cat > ' :abs_srcdir '/data/wet_syntax1.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_syntax1 (like test_dp1) EXECUTE :'wet_syntax1_file' FORMAT 'TEXT' (DELIMITER '|' ) DISTRIBUTED BY (a);
+\set wet_syntax2_file 'cat > ' :abs_srcdir '/data/wet_syntax2.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_syntax2 (like test_dp2) EXECUTE :'wet_syntax2_file' FORMAT 'TEXT' (DELIMITER '|' ) DISTRIBUTED BY (a);
+\set wet_syntax3_file 'cat > ' :abs_srcdir '/data/wet_syntax3.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_syntax3 (like test_dp2) EXECUTE :'wet_syntax3_file' FORMAT 'TEXT' (DELIMITER '|' ) DISTRIBUTED RANDOMLY;
INSERT INTO tbl_wet_syntax1 VALUES (generate_series(1,256), 'test_1');
INSERT INTO tbl_wet_syntax2 VALUES (generate_series(1,256), 'test_2');
INSERT INTO tbl_wet_syntax3 VALUES (generate_series(1,256), 'test_3');
@@ -2793,8 +2802,8 @@ CREATE TABLE table_execute (id integer, name varchar(40)) DISTRIBUTED RANDOMLY;
INSERT INTO table_execute VALUES (100, 'name_1');
INSERT INTO table_execute VALUES (200, 'name_2');
INSERT INTO table_execute VALUES (300, 'name_3');
-CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_execute (like table_execute) EXECUTE 'cat > @abs_srcdir@/data/wet_execute.tbl' FORMAT 'TEXT' (DELIMITER '|' );
-NOTICE: table doesn't have 'DISTRIBUTED BY' clause, defaulting to distribution columns from LIKE table
+\set wet_execute_file 'cat > ' :abs_srcdir '/data/wet_execute.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE tbl_wet_execute (like table_execute) EXECUTE :'wet_execute_file' FORMAT 'TEXT' (DELIMITER '|' );
INSERT INTO tbl_wet_execute SELECT * from table_execute ;
DROP TABLE IF EXISTS table_execute;
DROP EXTERNAL TABLE IF EXISTS tbl_wet_execute;
@@ -2823,17 +2832,17 @@ select distinct ptcname from (
-- drop temp external protocols
DROP PROTOCOL if exists demoprot;
-NOTICE: protocol "demoprot" does not exist, skipping
DROP PROTOCOL if exists demoprot2;
-- create external protocol with a serial type column
+\set serial_file 'file://' :hostname :abs_srcdir '/data/no/such/place/serial.tbl'
CREATE EXTERNAL TABLE SERIAL (a serial, x text)
-LOCATION ('file://@hostname@@abs_srcdir@/data/no/such/place/serial.tbl')
+LOCATION (:'serial_file')
FORMAT 'csv';
-- drop temp external table
DROP EXTERNAL TABLE IF EXISTS serial;
-- External table query within plpgSQL function get error
CREATE EXTERNAL TABLE exttab_error_context_callback(c1 int, c2 int)
-LOCATION ('file://@hostname@@abs_srcdir@/data/exttab.data') FORMAT 'TEXT';
+LOCATION (:'exttab_file') FORMAT 'TEXT';
CREATE or REPLACE FUNCTION exttab_error_context_callback_func()
RETURNS SETOF INTEGER
AS
@@ -2860,8 +2869,9 @@ DROP EXTERNAL TABLE exttab_error_context_callback;
-- --------------------------------------
-- Encoding
-- --------------------------------------
+\set latin1_encoding_file 'file://' :hostname :abs_srcdir '/data/latin1_encoding.csv'
CREATE EXTERNAL TABLE encoding_issue (num int, word text)
-LOCATION ('file://@hostname@@abs_srcdir@/data/latin1_encoding.csv')
+LOCATION (:'latin1_encoding_file')
FORMAT 'CSV' ENCODING 'LATIN1';
SELECT * FROM encoding_issue WHERE num = 4;
num | word
@@ -2870,8 +2880,9 @@ SELECT * FROM encoding_issue WHERE num = 4;
(1 row)
COPY (SELECT * FROM encoding_issue) TO '/tmp/latin1_encoding.csv' WITH (FORMAT 'csv', ENCODING 'LATIN1');
+\set latin1_encoding_file2 'file://' :hostname '/tmp/latin1_encoding.csv'
CREATE EXTERNAL TABLE encoding_issue2 (num int, word text)
-LOCATION ('file://@hostname@/tmp/latin1_encoding.csv')
+LOCATION (:'latin1_encoding_file2')
FORMAT 'CSV' ENCODING 'LATIN1';
SELECT * FROM encoding_issue2 WHERE num = 5;
num | word
@@ -4754,6 +4765,7 @@ line_delim=E'\n'
;
drop external table large_custom_format_definitions;
-- Incomplete external data file
+\set incomplete_formatter_data 'file://' :hostname :abs_srcdir '/data/incomplete_formatter_data.tbl'
CREATE OR REPLACE FUNCTION gpformatter() RETURNS record
AS '$libdir/gpformatter.so', 'formatter_import'
LANGUAGE C STABLE;
@@ -4763,7 +4775,7 @@ CREATE READABLE EXTERNAL TABLE tbl_ext_gpformatter (
d1 text
)
LOCATION (
- 'file://@hostname@@abs_srcdir@/data/incomplete_formatter_data.tbl'
+ :'incomplete_formatter_data'
)
FORMAT 'CUSTOM' (formatter='gpformatter');
SELECT * FROM tbl_ext_gpformatter;
@@ -4892,14 +4904,14 @@ EXPLAIN SELECT
FROM ext_subplan_t1;
QUERY PLAN
----------------------------------------------------------------------------------------------------
- Foreign Scan on ext_subplan_t1 (cost=0.00..449.70 rows=1000000 width=4)
+ Foreign Scan on ext_subplan_t1 (cost=0.00..451.90 rows=1000000 width=4)
SubPlan 1
- -> Limit (cost=0.00..65800465.14 rows=1 width=4)
- -> Result (cost=0.00..65800461.14 rows=1 width=4)
+ -> Limit (cost=0.00..65800467.34 rows=1 width=4)
+ -> Result (cost=0.00..65800463.34 rows=1 width=4)
Filter: (ext_subplan_t2.c1 = ext_subplan_t1.c1)
- -> Materialize (cost=0.00..461.14 rows=1000000 width=4)
- -> Foreign Scan on ext_subplan_t2 (cost=0.00..449.70 rows=1000000 width=4)
- Optimizer: Pivotal Optimizer (GPORCA)
+ -> Materialize (cost=0.00..463.34 rows=1000000 width=4)
+ -> Foreign Scan on ext_subplan_t2 (cost=0.00..451.90 rows=1000000 width=4)
+ Optimizer: GPORCA
(8 rows)
SELECT
@@ -4945,7 +4957,8 @@ DROP TABLE test_part_integrity;
CREATE WRITABLE EXTERNAL WEB TABLE ext_dist_repl(a int, b int) EXECUTE 'some command' FORMAT 'TEXT' DISTRIBUTED REPLICATED;
ERROR: external tables can't have DISTRIBUTED REPLICATED clause
-- Testing altering the distribution policy of external tables.
-CREATE WRITABLE EXTERNAL WEB TABLE ext_w_dist(a int, b int) EXECUTE 'cat > @abs_srcdir@/data/ext_w_dist.tbl' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ') DISTRIBUTED BY (a);
+\set ext_w_dist_file 'cat > ' :abs_srcdir '/data/ext_w_dist.tbl'
+CREATE WRITABLE EXTERNAL WEB TABLE ext_w_dist(a int, b int) EXECUTE :'ext_w_dist_file' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ') DISTRIBUTED BY (a);
ALTER TABLE ext_w_dist SET WITH (reorganize=true); -- should error out if forcing reorganize
ERROR: cannot reorganize external table "ext_w_dist"
SELECT policytype, distkey FROM gp_distribution_policy WHERE localoid = 'ext_w_dist'::regclass;
@@ -4978,8 +4991,9 @@ ERROR: cannot set distribution policy of readable external table "ext_r_dist"
-- Testing external table as the partition child.
CREATE TABLE part_root(a int) PARTITION BY RANGE(a);
CREATE TABLE part_child (LIKE part_root);
-CREATE EXTERNAL WEB TABLE part_ext_r(a int) EXECUTE 'cat > @abs_srcdir@/data/part_ext.tbl' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ');
-CREATE WRITABLE EXTERNAL WEB TABLE part_ext_w(a int, b int) EXECUTE 'cat > @abs_srcdir@/data/part_ext.tbl' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ') DISTRIBUTED BY (a);
+\set part_ext_file 'cat > ' :abs_srcdir '/data/part_ext.tbl'
+CREATE EXTERNAL WEB TABLE part_ext_r(a int) EXECUTE :'part_ext_file' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ');
+CREATE WRITABLE EXTERNAL WEB TABLE part_ext_w(a int, b int) EXECUTE :'part_ext_file' FORMAT 'TEXT' (DELIMITER AS '|' NULL AS 'null' ESCAPE AS ' ') DISTRIBUTED BY (a);
ALTER TABLE part_root ATTACH PARTITION part_child FOR VALUES FROM (0) TO (10);
ALTER TABLE part_root ATTACH PARTITION part_ext_r FOR VALUES FROM (10) TO (20);
NOTICE: partition constraints are not validated when attaching a readable external table
@@ -5001,14 +5015,17 @@ SELECT policytype, distkey FROM gp_distribution_policy WHERE localoid = 'part_ex
DROP TABLE part_root;
-- check logerrors value of pg_exttable
+\set ext_fasle_file 'file://' :hostname :abs_srcdir '/data/ext_fasle.tbl'
+\set ext_true_file 'file://' :hostname :abs_srcdir '/data/ext_true.tbl'
+\set ext_persistently_file 'file://' :hostname :abs_srcdir '/data/ext_persistently.tbl'
CREATE EXTERNAL TABLE ext_false (c INT)
-location ('file://@hostname@@abs_srcdir@/data/ext_fasle.tbl' )
+location (:'ext_fasle_file' )
FORMAT 'text' (delimiter '|');
CREATE EXTERNAL TABLE ext_true (c INT)
-location ('file://@hostname@@abs_srcdir@/data/ext_true.tbl' )
+location (:'ext_true_file' )
FORMAT 'text' (delimiter '|') LOG ERRORS SEGMENT REJECT LIMIT 100;
CREATE EXTERNAL TABLE ext_persistently (c INT)
-location ('file://@hostname@@abs_srcdir@/data/ext_persistently.tbl' )
+location (:'ext_persistently_file' )
FORMAT 'text' (delimiter '|') LOG ERRORS PERSISTENTLY SEGMENT REJECT LIMIT 100;
SELECT logerrors, options from pg_exttable a, pg_class b where a.reloid = b.oid and b.relname = 'ext_false';
logerrors | options
diff --git a/src/test/regress/expected/external_table_persistent_error_log_optimizer.out b/src/test/regress/expected/external_table_persistent_error_log_optimizer.out
index cb9249e6d0a..2151b746fff 100644
--- a/src/test/regress/expected/external_table_persistent_error_log_optimizer.out
+++ b/src/test/regress/expected/external_table_persistent_error_log_optimizer.out
@@ -25,9 +25,13 @@
--
-- end_matchsubs
SET optimizer_trace_fallback=on;
-COPY (VALUES('1,2'),('1,2,3'),('1,'),('1')) TO '@abs_srcdir@/data/tableerr.csv';
+\getenv abs_srcdir PG_ABS_SRCDIR
+\getenv hostname PG_HOSTNAME
+\set tableerr_file :abs_srcdir '/data/tableerr.csv'
+COPY (VALUES('1,2'),('1,2,3'),('1,'),('1')) TO :'tableerr_file';
+\set tableerr_file2 'file://' :hostname :abs_srcdir '/data/tableerr.csv'
create external table ext_error_persistent(a int, b int)
-location ('file://@hostname@@abs_srcdir@/data/tableerr.csv')
+location (:'tableerr_file2')
format 'csv'
log errors persistently segment reject limit 10;
select * from ext_error_persistent;
@@ -196,7 +200,7 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
-- with out the error_log_persistent option, which will use normal error log.
create external table ext_error_persistent(a int, b int)
-location ('file://@hostname@@abs_srcdir@/data/tableerr.csv')
+location (:'tableerr_file2')
format 'csv'
log errors segment reject limit 10;
select * from ext_error_persistent;
@@ -258,9 +262,10 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
---------+---------+--------
(0 rows)
+\set bytea_file 'file://' :hostname :abs_srcdir '/data/bytea.data'
CREATE EXTERNAL TABLE ext_bytea (id int, content bytea)
LOCATION (
- 'file://@hostname@@abs_srcdir@/data/bytea.data'
+ :'bytea_file'
) FORMAT 'CSV'
OPTIONS (error_log_persistent 'true')
LOG ERRORS SEGMENT REJECT LIMIT 5;
@@ -296,4 +301,4 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
---------+---------+--------
(0 rows)
-\! rm @abs_srcdir@/data/tableerr.csv
+\! rm $PG_ABS_SRCDIR/data/tableerr.csv
diff --git a/src/test/regress/expected/external_table_union_all_optimizer.out b/src/test/regress/expected/external_table_union_all_optimizer.out
index ca325443c2a..fb0b59d9c8d 100644
--- a/src/test/regress/expected/external_table_union_all_optimizer.out
+++ b/src/test/regress/expected/external_table_union_all_optimizer.out
@@ -1,10 +1,15 @@
SET optimizer_trace_fallback=on;
-- Test external table as left child of union all with replicated table
+\getenv abs_srcdir PG_ABS_SRCDIR
+\getenv hostname PG_HOSTNAME
+\set location1_file 'file://' :hostname :abs_srcdir '/data/location1.csv'
+\set location2_file 'file://' :hostname :abs_srcdir '/data/location2.csv'
+\set location3_file 'file://' :hostname :abs_srcdir '/data/location3.csv'
CREATE EXTERNAL TABLE multilocation_external_table(a INTEGER)
-location ('file://@hostname@@abs_srcdir@/data/location1.csv', 'file://@hostname@@abs_srcdir@/data/location2.csv', 'file://@hostname@@abs_srcdir@/data/location3.csv')
+location (:'location1_file', :'location2_file', :'location3_file')
ON ALL FORMAT 'text';
CREATE EXTERNAL TABLE one_external_table(a INTEGER)
-location ('file://@hostname@@abs_srcdir@/data/location2.csv')
+location (:'location2_file')
ON SEGMENT 2 FORMAT 'text';
CREATE TABLE simple_replicated_table(a integer) DISTRIBUTED REPLICATED;
INSERT INTO simple_replicated_table VALUES (1);
diff --git a/src/test/regress/expected/function_extensions_optimizer.out b/src/test/regress/expected/function_extensions_optimizer.out
index 8f875434085..ed852c3148c 100644
--- a/src/test/regress/expected/function_extensions_optimizer.out
+++ b/src/test/regress/expected/function_extensions_optimizer.out
@@ -391,50 +391,34 @@ explain select * from srf_testtab, test_srf() where test_srf = srf_testtab.t;
-- Test ALTER FUNCTION, and that \df displays the EXECUTE ON correctly
\df+ test_srf
- List of functions
- Schema | Name | Result data type | Argument data types | Type | Data access | Execute on | Volatility | Parallel | Owner | Security | Access privileges | Language | Source code | Description
---------+----------+------------------+---------------------+------+-------------+------------+------------+----------+-------------+----------+-------------------+----------+----------------------+-------------
- public | test_srf | SETOF text | | func | no sql | any | immutable | unsafe | srftestuser | invoker | | plpgsql | +|
- | | | | | | | | | | | | | begin +|
- | | | | | | | | | | | | | return next 'foo';+|
- | | | | | | | | | | | | | end; +|
- | | | | | | | | | | | | | |
+ List of functions
+ Schema | Name | Result data type | Argument data types | Type | Data access | Execute on | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description
+--------+----------+------------------+---------------------+------+-------------+------------+------------+----------+-------------+----------+-------------------+----------+---------------+-------------
+ public | test_srf | SETOF text | | func | no sql | any | immutable | unsafe | srftestuser | invoker | | plpgsql | |
(1 row)
alter function test_srf() EXECUTE ON COORDINATOR;
\df+ test_srf
- List of functions
- Schema | Name | Result data type | Argument data types | Type | Data access | Execute on | Volatility | Parallel | Owner | Security | Access privileges | Language | Source code | Description
---------+----------+------------------+---------------------+------+-------------+-------------+------------+----------+-------------+----------+-------------------+----------+----------------------+-------------
- public | test_srf | SETOF text | | func | no sql | coordinator | immutable | unsafe | srftestuser | invoker | | plpgsql | +|
- | | | | | | | | | | | | | begin +|
- | | | | | | | | | | | | | return next 'foo';+|
- | | | | | | | | | | | | | end; +|
- | | | | | | | | | | | | | |
+ List of functions
+ Schema | Name | Result data type | Argument data types | Type | Data access | Execute on | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description
+--------+----------+------------------+---------------------+------+-------------+-------------+------------+----------+-------------+----------+-------------------+----------+---------------+-------------
+ public | test_srf | SETOF text | | func | no sql | coordinator | immutable | unsafe | srftestuser | invoker | | plpgsql | |
(1 row)
alter function test_srf() EXECUTE ON ALL SEGMENTS;
\df+ test_srf
- List of functions
- Schema | Name | Result data type | Argument data types | Type | Data access | Execute on | Volatility | Parallel | Owner | Security | Access privileges | Language | Source code | Description
---------+----------+------------------+---------------------+------+-------------+--------------+------------+----------+-------------+----------+-------------------+----------+----------------------+-------------
- public | test_srf | SETOF text | | func | no sql | all segments | immutable | unsafe | srftestuser | invoker | | plpgsql | +|
- | | | | | | | | | | | | | begin +|
- | | | | | | | | | | | | | return next 'foo';+|
- | | | | | | | | | | | | | end; +|
- | | | | | | | | | | | | | |
+ List of functions
+ Schema | Name | Result data type | Argument data types | Type | Data access | Execute on | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description
+--------+----------+------------------+---------------------+------+-------------+--------------+------------+----------+-------------+----------+-------------------+----------+---------------+-------------
+ public | test_srf | SETOF text | | func | no sql | all segments | immutable | unsafe | srftestuser | invoker | | plpgsql | |
(1 row)
alter function test_srf() EXECUTE ON ANY;
\df+ test_srf
- List of functions
- Schema | Name | Result data type | Argument data types | Type | Data access | Execute on | Volatility | Parallel | Owner | Security | Access privileges | Language | Source code | Description
---------+----------+------------------+---------------------+------+-------------+------------+------------+----------+-------------+----------+-------------------+----------+----------------------+-------------
- public | test_srf | SETOF text | | func | no sql | any | immutable | unsafe | srftestuser | invoker | | plpgsql | +|
- | | | | | | | | | | | | | begin +|
- | | | | | | | | | | | | | return next 'foo';+|
- | | | | | | | | | | | | | end; +|
- | | | | | | | | | | | | | |
+ List of functions
+ Schema | Name | Result data type | Argument data types | Type | Data access | Execute on | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description
+--------+----------+------------------+---------------------+------+-------------+------------+------------+----------+-------------+----------+-------------------+----------+---------------+-------------
+ public | test_srf | SETOF text | | func | no sql | any | immutable | unsafe | srftestuser | invoker | | plpgsql | |
(1 row)
DROP FUNCTION test_srf();
diff --git a/src/test/regress/expected/generated_optimizer.out b/src/test/regress/expected/generated_optimizer.out
index c1a1eeec122..34051030819 100644
--- a/src/test/regress/expected/generated_optimizer.out
+++ b/src/test/regress/expected/generated_optimizer.out
@@ -89,6 +89,9 @@ LINE 1: ..._3 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (c * 2) STO...
-- generation expression must be immutable
CREATE TABLE gtest_err_4 (a int PRIMARY KEY, b double precision GENERATED ALWAYS AS (random()) STORED);
ERROR: generation expression is not immutable
+-- ... but be sure that the immutability test is accurate
+CREATE TABLE gtest2 (a int, b text GENERATED ALWAYS AS (a || ' sec') STORED);
+DROP TABLE gtest2;
-- cannot have default/identity and generated
CREATE TABLE gtest_err_5a (a int PRIMARY KEY, b int DEFAULT 5 GENERATED ALWAYS AS (a * 2) STORED);
ERROR: both default and generation expression specified for column "b" of table "gtest_err_5a"
@@ -227,6 +230,28 @@ SELECT * FROM gtest1 ORDER BY a;
3 | 6
(1 row)
+-- test MERGE
+CREATE TABLE gtestm (
+ id int PRIMARY KEY,
+ f1 int,
+ f2 int,
+ f3 int GENERATED ALWAYS AS (f1 * 2) STORED,
+ f4 int GENERATED ALWAYS AS (f2 * 2) STORED
+);
+INSERT INTO gtestm VALUES (1, 5, 100);
+MERGE INTO gtestm t USING (VALUES (1, 10), (2, 20)) v(id, f1) ON t.id = v.id
+ WHEN MATCHED THEN UPDATE SET f1 = v.f1
+ WHEN NOT MATCHED THEN INSERT VALUES (v.id, v.f1, 200);
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+SELECT * FROM gtestm ORDER BY id;
+ id | f1 | f2 | f3 | f4
+----+----+-----+----+-----
+ 1 | 10 | 100 | 20 | 200
+ 2 | 20 | 200 | 40 | 400
+(2 rows)
+
+DROP TABLE gtestm;
-- views
CREATE VIEW gtest1v AS SELECT * FROM gtest1;
SELECT * FROM gtest1v;
@@ -324,11 +349,27 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
4 | 8
(2 rows)
+-- can't have generated column that is a child of normal column
CREATE TABLE gtest_normal (a int, b int);
-CREATE TABLE gtest_normal_child (a int, b int GENERATED ALWAYS AS (a * 2) STORED) INHERITS (gtest_normal);
+CREATE TABLE gtest_normal_child (a int, b int GENERATED ALWAYS AS (a * 2) STORED) INHERITS (gtest_normal); -- error
NOTICE: merging column "a" with inherited definition
NOTICE: merging column "b" with inherited definition
-\d gtest_normal_child
+ERROR: child column "b" specifies generation expression
+HINT: A child table column cannot be generated unless its parent column is.
+CREATE TABLE gtest_normal_child (a int, b int GENERATED ALWAYS AS (a * 2) STORED);
+ALTER TABLE gtest_normal_child INHERIT gtest_normal; -- error
+ERROR: column "b" in child table must not be a generated column
+DROP TABLE gtest_normal, gtest_normal_child;
+-- test inheritance mismatches between parent and child
+CREATE TABLE gtestx (x int, b int DEFAULT 10) INHERITS (gtest1); -- error
+NOTICE: merging column "b" with inherited definition
+ERROR: column "b" inherits from generated column but specifies default
+CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS IDENTITY) INHERITS (gtest1); -- error
+NOTICE: merging column "b" with inherited definition
+ERROR: column "b" inherits from generated column but specifies identity
+CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS (a * 22) STORED) INHERITS (gtest1); -- ok, overrides parent
+NOTICE: merging column "b" with inherited definition
+\d+ gtestx
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
@@ -340,84 +381,100 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
-DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
-INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
- Table "public.gtest_normal_child"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+------------------------------------
- a | integer | | |
- b | integer | | | generated always as (a * 2) stored
-Inherits: gtest_normal
-Distributed by: (a)
-
-INSERT INTO gtest_normal (a) VALUES (1);
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
-DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Inherited tables
-INSERT INTO gtest_normal_child (a) VALUES (2);
-SELECT * FROM gtest_normal;
-INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
-DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Inherited tables
- a | b
----+---
- 1 |
- 2 | 4
-(2 rows)
-
-CREATE TABLE gtest_normal_child2 (a int, b int GENERATED ALWAYS AS (a * 3) STORED);
-ALTER TABLE gtest_normal_child2 INHERIT gtest_normal;
-INSERT INTO gtest_normal_child2 (a) VALUES (3);
-SELECT * FROM gtest_normal;
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
-DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Inherited tables
- a | b
----+---
- 1 |
- 2 | 4
- 3 | 9
-(3 rows)
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ Table "public.gtestx"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+-------------------------------------+---------+--------------+-------------
+ a | integer | | not null | | plain | |
+ b | integer | | | generated always as (a * 22) stored | plain | |
+ x | integer | | | | plain | |
+Inherits: gtest1
--- test inheritance mismatches between parent and child
-CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS (a * 22) STORED) INHERITS (gtest1); -- error
-NOTICE: merging column "b" with inherited definition
-ERROR: child column "b" specifies generation expression
-HINT: Omit the generation expression in the definition of the child table column to inherit the generation expression from the parent table.
-CREATE TABLE gtestx (x int, b int DEFAULT 10) INHERITS (gtest1); -- error
-NOTICE: merging column "b" with inherited definition
-ERROR: column "b" inherits from generated column but specifies default
-CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS IDENTITY) INHERITS (gtest1); -- error
-NOTICE: merging column "b" with inherited definition
-ERROR: column "b" inherits from generated column but specifies identity
CREATE TABLE gtestxx_1 (a int NOT NULL, b int);
ALTER TABLE gtestxx_1 INHERIT gtest1; -- error
ERROR: column "b" in child table must be a generated column
-CREATE TABLE gtestxx_2 (a int NOT NULL, b int GENERATED ALWAYS AS (a * 22) STORED);
-ALTER TABLE gtestxx_2 INHERIT gtest1; -- error
-ERROR: column "b" in child table has a conflicting generation expression
CREATE TABLE gtestxx_3 (a int NOT NULL, b int GENERATED ALWAYS AS (a * 2) STORED);
ALTER TABLE gtestxx_3 INHERIT gtest1; -- ok
CREATE TABLE gtestxx_4 (b int GENERATED ALWAYS AS (a * 2) STORED, a int NOT NULL);
ALTER TABLE gtestxx_4 INHERIT gtest1; -- ok
-- test multiple inheritance mismatches
+CREATE TABLE gtesty (x int, b int DEFAULT 55);
+CREATE TABLE gtest1_y () INHERITS (gtest0, gtesty); -- error
+NOTICE: merging multiple inherited definitions of column "b"
+ERROR: inherited column "b" has a generation conflict
+DROP TABLE gtesty;
CREATE TABLE gtesty (x int, b int);
-CREATE TABLE gtest1_2 () INHERITS (gtest1, gtesty); -- error
+CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty); -- error
NOTICE: merging multiple inherited definitions of column "b"
ERROR: inherited column "b" has a generation conflict
DROP TABLE gtesty;
CREATE TABLE gtesty (x int, b int GENERATED ALWAYS AS (x * 22) STORED);
-CREATE TABLE gtest1_2 () INHERITS (gtest1, gtesty); -- error
+CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty); -- error
NOTICE: merging multiple inherited definitions of column "b"
ERROR: column "b" inherits conflicting generation expressions
-DROP TABLE gtesty;
-CREATE TABLE gtesty (x int, b int DEFAULT 55);
-CREATE TABLE gtest1_2 () INHERITS (gtest0, gtesty); -- error
+HINT: To resolve the conflict, specify a generation expression explicitly.
+CREATE TABLE gtest1_y (b int GENERATED ALWAYS AS (x + 1) STORED) INHERITS (gtest1, gtesty); -- ok
NOTICE: merging multiple inherited definitions of column "b"
-ERROR: inherited column "b" has a generation conflict
-DROP TABLE gtesty;
+NOTICE: moving and merging column "b" with inherited definition
+DETAIL: User-specified column moved to the position of the inherited column.
+\d gtest1_y
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ Table "public.gtest1_y"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+------------------------------------
+ a | integer | | not null |
+ b | integer | | | generated always as (x + 1) stored
+ x | integer | | |
+Inherits: gtest1,
+ gtesty
+
+-- test correct handling of GENERATED column that's only in child
+CREATE TABLE gtestp (f1 int);
+CREATE TABLE gtestc (f2 int GENERATED ALWAYS AS (f1+1) STORED) INHERITS(gtestp);
+INSERT INTO gtestc values(42);
+TABLE gtestc;
+ f1 | f2
+----+----
+ 42 | 43
+(1 row)
+
+UPDATE gtestp SET f1 = f1 * 10;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Inherited tables
+ERROR: can't split update for inherit table:
+TABLE gtestc;
+ f1 | f2
+----+----
+ 42 | 43
+(1 row)
+
+DROP TABLE gtestp CASCADE;
+NOTICE: drop cascades to table gtestc
-- test stored update
CREATE TABLE gtest3 (a int, b int GENERATED ALWAYS AS (a * 3) STORED);
INSERT INTO gtest3 (a) VALUES (1), (2), (3), (NULL);
@@ -562,7 +619,12 @@ SELECT * FROM gtest_tableoid;
-- drop column behavior
CREATE TABLE gtest10 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (b * 2) STORED);
-ALTER TABLE gtest10 DROP COLUMN b;
+ALTER TABLE gtest10 DROP COLUMN b; -- fails
+ERROR: cannot drop column b of table gtest10 because other objects depend on it
+DETAIL: column c of table gtest10 depends on column b of table gtest10
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+ALTER TABLE gtest10 DROP COLUMN b CASCADE; -- drops c too
+NOTICE: drop cascades to column c of table gtest10
\d gtest10
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
@@ -625,6 +687,10 @@ SELECT a, c FROM gtest12s; -- allowed
(2 rows)
RESET ROLE;
+DROP FUNCTION gf1(int); -- fail
+ERROR: cannot drop function gf1(integer) because other objects depend on it
+DETAIL: column c of table gtest12s depends on function gf1(integer)
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
DROP TABLE gtest11s, gtest12s;
DROP FUNCTION gf1(int);
DROP USER regress_user11;
@@ -822,21 +888,142 @@ CREATE TABLE gtest24 (a int PRIMARY KEY, b gtestdomain1 GENERATED ALWAYS AS (a *
INSERT INTO gtest24 (a) VALUES (4); -- ok
INSERT INTO gtest24 (a) VALUES (6); -- error
ERROR: value for domain gtestdomain1 violates check constraint "gtestdomain1_check"
+CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL);
+CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) STORED);
+INSERT INTO gtest24nn (a) VALUES (4); -- ok
+INSERT INTO gtest24nn (a) VALUES (NULL); -- error
+ERROR: value for domain gtestdomainnn violates check constraint "gtestdomainnn_check"
-- typed tables (currently not supported)
CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint);
CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) STORED);
ERROR: generated columns are not supported on typed tables
DROP TYPE gtest_type CASCADE;
--- table partitions (currently not supported)
-CREATE TABLE gtest_parent (f1 date NOT NULL, f2 text, f3 bigint) PARTITION BY RANGE (f1);
+-- partitioning cases
+CREATE TABLE gtest_parent (f1 date NOT NULL, f2 bigint, f3 bigint) PARTITION BY RANGE (f1);
CREATE TABLE gtest_child PARTITION OF gtest_parent (
f3 WITH OPTIONS GENERATED ALWAYS AS (f2 * 2) STORED
) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -- error
-ERROR: generated columns are not supported on partitions
-DROP TABLE gtest_parent;
--- partitioned table
+ERROR: child column "f3" specifies generation expression
+HINT: A child table column cannot be generated unless its parent column is.
+CREATE TABLE gtest_child (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED);
+ALTER TABLE gtest_parent ATTACH PARTITION gtest_child FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -- error
+ERROR: column "f3" in child table must not be a generated column
+DROP TABLE gtest_parent, gtest_child;
CREATE TABLE gtest_parent (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f1);
-CREATE TABLE gtest_child PARTITION OF gtest_parent FOR VALUES FROM ('2016-07-01') TO ('2016-08-01');
+CREATE TABLE gtest_child PARTITION OF gtest_parent
+ FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -- inherits gen expr
+CREATE TABLE gtest_child2 PARTITION OF gtest_parent (
+ f3 WITH OPTIONS GENERATED ALWAYS AS (f2 * 22) STORED -- overrides gen expr
+) FOR VALUES FROM ('2016-08-01') TO ('2016-09-01');
+CREATE TABLE gtest_child3 PARTITION OF gtest_parent (
+ f3 DEFAULT 42 -- error
+) FOR VALUES FROM ('2016-09-01') TO ('2016-10-01');
+ERROR: column "f3" inherits from generated column but specifies default
+CREATE TABLE gtest_child3 PARTITION OF gtest_parent (
+ f3 WITH OPTIONS GENERATED ALWAYS AS IDENTITY -- error
+) FOR VALUES FROM ('2016-09-01') TO ('2016-10-01');
+ERROR: identity columns are not supported on partitions
+CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint);
+ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); -- error
+ERROR: column "f3" in child table must be a generated column
+DROP TABLE gtest_child3;
+CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint DEFAULT 42);
+ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); -- error
+ERROR: column "f3" in child table must be a generated column
+DROP TABLE gtest_child3;
+CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS IDENTITY);
+ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); -- error
+ERROR: column "f3" in child table must be a generated column
+DROP TABLE gtest_child3;
+CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 33) STORED);
+ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01');
+\d gtest_child
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ Table "public.gtest_child"
+ Column | Type | Collation | Nullable | Default
+--------+--------+-----------+----------+-------------------------------------
+ f1 | date | | not null |
+ f2 | bigint | | |
+ f3 | bigint | | | generated always as (f2 * 2) stored
+Partition of: gtest_parent FOR VALUES FROM ('07-01-2016') TO ('08-01-2016')
+
+\d gtest_child2
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ Table "public.gtest_child2"
+ Column | Type | Collation | Nullable | Default
+--------+--------+-----------+----------+--------------------------------------
+ f1 | date | | not null |
+ f2 | bigint | | |
+ f3 | bigint | | | generated always as (f2 * 22) stored
+Partition of: gtest_parent FOR VALUES FROM ('08-01-2016') TO ('09-01-2016')
+
+\d gtest_child3
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ Table "public.gtest_child3"
+ Column | Type | Collation | Nullable | Default
+--------+--------+-----------+----------+--------------------------------------
+ f1 | date | | not null |
+ f2 | bigint | | |
+ f3 | bigint | | | generated always as (f2 * 33) stored
+Partition of: gtest_parent FOR VALUES FROM ('09-01-2016') TO ('10-01-2016')
+
INSERT INTO gtest_parent (f1, f2) VALUES ('2016-07-15', 1);
SELECT * FROM gtest_parent;
f1 | f2 | f3
@@ -850,14 +1037,29 @@ SELECT * FROM gtest_child;
07-15-2016 | 1 | 2
(1 row)
-DROP TABLE gtest_parent;
+UPDATE gtest_parent SET f1 = f1 + 60;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: DML(update) on partitioned tables
+SELECT * FROM gtest_parent;
+ f1 | f2 | f3
+------------+----+----
+ 09-13-2016 | 1 | 33
+(1 row)
+
+SELECT * FROM gtest_child3;
+ f1 | f2 | f3
+------------+----+----
+ 09-13-2016 | 1 | 33
+(1 row)
+
+-- we leave these tables around for purposes of testing dump/reload/upgrade
-- generated columns in partition key (not allowed)
-CREATE TABLE gtest_parent (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f3);
+CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f3);
ERROR: cannot use generated column in partition key
LINE 1: ...ENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f3);
^
DETAIL: Column "f3" is a generated column.
-CREATE TABLE gtest_parent (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((f3 * 3));
+CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((f3 * 3));
ERROR: cannot use generated column in partition key
LINE 1: ...ED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((f3 * 3));
^
@@ -971,7 +1173,8 @@ SELECT * FROM gtest27;
(2 rows)
ALTER TABLE gtest27 ALTER COLUMN x TYPE boolean USING x <> 0; -- error
-ERROR: generation expression for column "x" cannot be cast automatically to type boolean
+ERROR: cannot specify USING when altering type of generated column
+DETAIL: Column "x" is a generated column.
ALTER TABLE gtest27 ALTER COLUMN x DROP DEFAULT; -- error
ERROR: column "x" of relation "gtest27" is a generated column
HINT: Use ALTER TABLE ... ALTER COLUMN ... DROP EXPRESSION instead.
@@ -1280,7 +1483,7 @@ CREATE TRIGGER gtest2a BEFORE INSERT OR UPDATE ON gtest26
WHEN (NEW.b < 0) -- error
EXECUTE PROCEDURE gtest_trigger_func();
ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns
-LINE 3: WHEN (NEW.b < 0)
+LINE 3: WHEN (NEW.b < 0) -- error
^
DETAIL: Column "b" is a generated column.
CREATE TRIGGER gtest2b BEFORE INSERT OR UPDATE ON gtest26
@@ -1288,7 +1491,7 @@ CREATE TRIGGER gtest2b BEFORE INSERT OR UPDATE ON gtest26
WHEN (NEW.* IS NOT NULL) -- error
EXECUTE PROCEDURE gtest_trigger_func();
ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns
-LINE 3: WHEN (NEW.* IS NOT NULL)
+LINE 3: WHEN (NEW.* IS NOT NULL) -- error
^
DETAIL: A whole-row reference is used and the table contains generated columns.
CREATE TRIGGER gtest2 BEFORE INSERT ON gtest26
diff --git a/src/test/regress/expected/geometry.out b/src/test/regress/expected/geometry.out
index 403d0d80e2b..3922102c47c 100644
--- a/src/test/regress/expected/geometry.out
+++ b/src/test/regress/expected/geometry.out
@@ -5253,16 +5253,18 @@ SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
EXPLAIN (COSTS OFF)
SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
ORDER BY area(f1);
- QUERY PLAN
-----------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- Merge Key: (area(f1))
- -> Sort
- Sort Key: (area(f1))
- -> Seq Scan on circle_tbl
- Filter: (f1 && '<(1,-2),1>'::circle)
+ QUERY PLAN
+--------------------------------------------------------------
+ Result
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: (area(f1))
+ -> Sort
+ Sort Key: (area(f1))
+ -> Index Scan using gcircleind on circle_tbl
+ Index Cond: (f1 && '<(1,-2),1>'::circle)
+ Filter: (f1 && '<(1,-2),1>'::circle)
Optimizer: Postgres query optimizer
-(7 rows)
+(9 rows)
SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
ORDER BY area(f1);
@@ -5286,16 +5288,18 @@ SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
EXPLAIN (COSTS OFF)
SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
ORDER BY (poly_center(f1))[0];
- QUERY PLAN
---------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- Merge Key: ((poly_center(f1))[0])
- -> Sort
- Sort Key: ((poly_center(f1))[0])
- -> Seq Scan on polygon_tbl
- Filter: (f1 @> '((1,1),(2,2),(2,1))'::polygon)
+ QUERY PLAN
+---------------------------------------------------------------------
+ Result
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: ((poly_center(f1))[0])
+ -> Sort
+ Sort Key: ((poly_center(f1))[0])
+ -> Index Scan using gpolygonind on polygon_tbl
+ Index Cond: (f1 @> '((1,1),(2,2),(2,1))'::polygon)
+ Filter: (f1 @> '((1,1),(2,2),(2,1))'::polygon)
Optimizer: Postgres query optimizer
-(7 rows)
+(9 rows)
SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
ORDER BY (poly_center(f1))[0];
diff --git a/src/test/regress/expected/gin_optimizer.out b/src/test/regress/expected/gin_optimizer.out
index ef5c879df76..59f47d7e244 100644
--- a/src/test/regress/expected/gin_optimizer.out
+++ b/src/test/regress/expected/gin_optimizer.out
@@ -179,6 +179,7 @@ begin
end;
$$;
-- check number of rows returned by index and removed by recheck
+-- start_ignore
select
query,
js->0->'Plan'->'Plans'->0->'Actual Rows' as "return by index",
@@ -200,20 +201,21 @@ from
lateral explain_query_json($$select * from t_gin_test_tbl where $$ || query) js,
lateral execute_text_query_index($$select string_agg((i, j)::text, ' ') from ( select * from t_gin_test_tbl where $$ || query || $$ order by i ) a$$ ) res_index,
lateral execute_text_query_heap($$select string_agg((i, j)::text, ' ') from ( select * from t_gin_test_tbl where $$ || query || $$ order by i ) a $$ ) res_heap;
- query | return by index | removed by recheck | match
+ query | return by index | removed by recheck | match
-------------------------------------------+-----------------+--------------------+-------
- i @> '{}' | 4 | 0 | t
- j @> '{}' | 5 | | t
+ i @> '{}' | 4 | 0 | f
+ j @> '{}' | 5 | | f
i @> '{}' and j @> '{}' | 3 | | t
- i @> '{1}' | 2 | 0 | t
+ i @> '{1}' | 2 | 0 | f
i @> '{1}' and j @> '{}' | 2 | | t
i @> '{1}' and i @> '{}' and j @> '{}' | 2 | | t
j @> '{10}' | 3 | | t
j @> '{10}' and i @> '{}' | 2 | | t
j @> '{10}' and j @> '{}' and i @> '{}' | 2 | | t
- i @> '{1}' and j @> '{10}' | 1 | | t
+ i @> '{1}' and j @> '{10}' | 1 | | f
(10 rows)
+-- end_ignore
reset enable_seqscan;
reset enable_bitmapscan;
-- re-purpose t_gin_test_tbl to test scans involving posting trees
@@ -308,3 +310,12 @@ select count(*) from t_gin_test_tbl where j @> '{}'::int[];
reset enable_seqscan;
reset enable_bitmapscan;
drop table t_gin_test_tbl;
+-- test an unlogged table, mostly to get coverage of ginbuildempty
+create unlogged table t_gin_test_tbl(i int4[], j int4[]);
+create index on t_gin_test_tbl using gin (i, j);
+insert into t_gin_test_tbl
+values
+ (null, null),
+ ('{}', null),
+ ('{1}', '{2,3}');
+drop table t_gin_test_tbl;
diff --git a/src/test/regress/expected/gist_optimizer.out b/src/test/regress/expected/gist_optimizer.out
index e9020c5db70..5062ac383c5 100644
--- a/src/test/regress/expected/gist_optimizer.out
+++ b/src/test/regress/expected/gist_optimizer.out
@@ -486,8 +486,17 @@ select p from gist_tbl order by circle(p,1) <-> point(0,0), p <-> point(0,0) lim
(0.7,0.7)
(15 rows)
+-- Force an index build using buffering.
+create index gist_tbl_box_index_forcing_buffering on gist_tbl using gist (p)
+ with (buffering=on, fillfactor=50);
-- Clean up
reset enable_seqscan;
reset enable_bitmapscan;
reset enable_indexonlyscan;
drop table gist_tbl;
+-- test an unlogged table, mostly to get coverage of gistbuildempty
+create unlogged table gist_tbl (b box);
+create index gist_tbl_box_index on gist_tbl using gist (b);
+insert into gist_tbl
+ select box(point(0.05*i, 0.05*i)) from generate_series(0,10) as i;
+drop table gist_tbl;
diff --git a/src/test/regress/expected/gp_array_agg_optimizer.out b/src/test/regress/expected/gp_array_agg_optimizer.out
index 94af2d0d82f..c3c169492ad 100644
--- a/src/test/regress/expected/gp_array_agg_optimizer.out
+++ b/src/test/regress/expected/gp_array_agg_optimizer.out
@@ -221,16 +221,18 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
-> GroupAggregate
Group Key: pagg_test.y
-> Sort
- Sort Key: pagg_test.y
+ Sort Key: pagg_test.y, (((unnest(regexp_split_to_array((string_agg((pagg_test.x)::text, ','::text)), ','::text))))::integer)
-> Result
-> ProjectSet
- -> HashAggregate
+ -> Finalize HashAggregate
Group Key: pagg_test.y
-> Redistribute Motion 3:3 (slice2; segments: 3)
Hash Key: pagg_test.y
- -> Seq Scan on pagg_test
+ -> Partial HashAggregate
+ Group Key: pagg_test.y
+ -> Seq Scan on pagg_test
Optimizer: Postgres query optimizer
-(14 rows)
+(16 rows)
-- Test array_agg(anyarray)
create table int_array_table (a int, arr int[]);
diff --git a/src/test/regress/expected/gp_explain_optimizer.out b/src/test/regress/expected/gp_explain_optimizer.out
index 0b476d484cd..251ef9401fd 100644
--- a/src/test/regress/expected/gp_explain_optimizer.out
+++ b/src/test/regress/expected/gp_explain_optimizer.out
@@ -567,7 +567,7 @@ EXPLAIN (analyze, costs off, timing off, summary off) UPDATE explain_subplan.ra
Update on part_a_1_a_10 range_parted_1
Update on part_a_10_a_20 range_parted_2
-> Explicit Redistribute Motion 1:3 (slice1; segments: 1) (actual rows=1 loops=1)
- -> Split (actual rows=2 loops=1)
+ -> Split Update (actual rows=2 loops=1)
-> Append (actual rows=1 loops=1)
-> Seq Scan on part_a_1_a_10 range_parted_1 (actual rows=0 loops=1)
Filter: ((a = 'a'::text) AND (c = '200'::numeric))
diff --git a/src/test/regress/expected/gp_gin_index_optimizer.out b/src/test/regress/expected/gp_gin_index_optimizer.out
index 86a414eabd3..ad1ad97539f 100644
--- a/src/test/regress/expected/gp_gin_index_optimizer.out
+++ b/src/test/regress/expected/gp_gin_index_optimizer.out
@@ -3,95 +3,88 @@ SET optimizer_enable_tablescan = off;
SET enable_seqscan = off;
set enable_bitmapscan = on;
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"wait": null}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"wait": null}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"wait": null}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"wait": null}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"wait": "CC"}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"wait": "CC"}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"wait": "CC"}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"wait": "CC"}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"wait": "CC", "public": true}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"wait": "CC", "public": true}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"wait": "CC", "public": true}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"wait": "CC", "public": true}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"age": 25}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"age": 25}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"age": 25}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"age": 25}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"age": 25.0}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"age": 25.0}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"age": 25.0}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"age": 25.0}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"array":["foo"]}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"array": ["foo"]}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"array": ["foo"]}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"array": ["foo"]}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"array": ["foo"]}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"array": ["bar"]}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"array": ["bar"]}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"array": ["bar"]}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"array": ["bar"]}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
count
@@ -137,69 +130,64 @@ SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}';
-- exercise GIN_SEARCH_MODE_ALL
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j ? 'public';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j ? 'public'::text)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j ? 'public'::text)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j ? 'public'::text)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j ? 'public'::text)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j ? 'bar';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j ? 'bar'::text)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j ? 'bar'::text)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j ? 'bar'::text)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j ? 'bar'::text)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled'];
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j ?| '{public,disabled}'::text[])
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j ?| '{public,disabled}'::text[])
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j ?| '{public,disabled}'::text[])
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j ?| '{public,disabled}'::text[])
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled'];
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j ?& '{public,disabled}'::text[])
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j ?& '{public,disabled}'::text[])
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j ?& '{public,disabled}'::text[])
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j ?& '{public,disabled}'::text[])
+ Optimizer: GPORCA
+(7 rows)
SELECT count(*) FROM testjsonb WHERE j @> '{}';
count
@@ -235,14 +223,14 @@ SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled'];
CREATE INDEX jidx_array ON testjsonb USING gin((j->'array'));
-- gin index on expression not support for orca
EXPLAIN SELECT count(*) from testjsonb WHERE j->'array' ? 'bar';
- QUERY PLAN
----------------------------------------------------------------------------------------
- Finalize Aggregate (cost=15.19..14.20 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=14.14..14.19 rows=3 width=8)
- -> Partial Aggregate (cost=14.14..14.15 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=12.08..14.13 rows=3 width=0)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Finalize Aggregate (cost=45.73..45.74 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=45.68..45.73 rows=3 width=8)
+ -> Partial Aggregate (cost=45.68..45.69 rows=1 width=8)
+ -> Bitmap Heap Scan on testjsonb (cost=8.60..45.24 rows=176 width=0)
Recheck Cond: ((j -> 'array'::text) ? 'bar'::text)
- -> Bitmap Index Scan on jidx_array (cost=0.00..12.08 rows=3 width=0)
+ -> Bitmap Index Scan on jidx_array (cost=0.00..8.55 rows=176 width=0)
Index Cond: ((j -> 'array'::text) ? 'bar'::text)
Optimizer: Postgres query optimizer
(8 rows)
@@ -300,69 +288,64 @@ DROP INDEX jidx_array;
DROP INDEX jidx;
CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops);
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"wait": null}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"wait": null}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"wait": null}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"wait": null}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"wait": "CC"}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"wait": "CC"}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"wait": "CC"}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"wait": "CC"}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"wait": "CC", "public": true}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"wait": "CC", "public": true}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"wait": "CC"}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"wait": "CC"}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"age": 25}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"age": 25}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"age": 25}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"age": 25}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{"age": 25.0}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{"age": 25.0}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{"age": 25.0}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{"age": 25.0}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
count
@@ -396,17 +379,16 @@ SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
-- exercise GIN_SEARCH_MODE_ALL
EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{}';
- QUERY PLAN
------------------------------------------------------------------------------------------
- Finalize Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..33665.68 rows=1 width=8)
- -> Partial Aggregate (cost=0.00..33665.68 rows=1 width=8)
- -> Bitmap Heap Scan on testjsonb (cost=0.00..33665.68 rows=135 width=1)
- Recheck Cond: (j @> '{}'::jsonb)
- -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
- Index Cond: (j @> '{}'::jsonb)
- Optimizer: Pivotal Optimizer (GPORCA) version 3.58.1
-(8 rows)
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate (cost=0.00..391.30 rows=1 width=8)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..391.30 rows=1 width=1)
+ -> Bitmap Heap Scan on testjsonb (cost=0.00..391.30 rows=1 width=1)
+ Recheck Cond: (j @> '{}'::jsonb)
+ -> Bitmap Index Scan on jidx (cost=0.00..0.00 rows=0 width=0)
+ Index Cond: (j @> '{}'::jsonb)
+ Optimizer: GPORCA
+(7 rows)
SELECT count(*) FROM testjsonb WHERE j @> '{}';
count
@@ -660,7 +642,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)';
SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*';
count
-------
- 494
+ 496
(1 row)
-- For orca, ScalarArrayOpExpr condition on index scan not supported
@@ -679,7 +661,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme';
SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme';
count
-------
- 508
+ 1022
(1 row)
DROP INDEX wowidx;
diff --git a/src/test/regress/expected/gpdist_legacy_opclasses_optimizer.out b/src/test/regress/expected/gpdist_legacy_opclasses_optimizer.out
index 17ac0786a8f..9e3b969a024 100644
--- a/src/test/regress/expected/gpdist_legacy_opclasses_optimizer.out
+++ b/src/test/regress/expected/gpdist_legacy_opclasses_optimizer.out
@@ -296,8 +296,8 @@ explain (costs off) select * from modern_int a inner join legacy_domain_over_int
(9 rows)
create type colors as enum ('red', 'green', 'blue');
-create table legacy_enum(color colors) distributed by(color cdbhash_enum_ops);
-insert into legacy_enum values ('red'), ('green'), ('blue');
+create table legacy_enum(col1 int, color colors) distributed by(col1);
+insert into legacy_enum values (1, 'red'), (2, 'green'), (3, 'blue');
explain (costs off) select * from legacy_enum a inner join legacy_enum b on a.color = b.color;
QUERY PLAN
--------------------------------------------------------------
@@ -306,16 +306,17 @@ explain (costs off) select * from legacy_enum a inner join legacy_enum b on a.co
Hash Cond: ((a.color)::anyenum = (b.color)::anyenum)
-> Seq Scan on legacy_enum a
-> Hash
- -> Seq Scan on legacy_enum b
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on legacy_enum b
Optimizer: GPORCA
-(7 rows)
+(8 rows)
select * from legacy_enum a inner join legacy_enum b on a.color = b.color;
- color | color
--------+-------
- blue | blue
- red | red
- green | green
+ col1 | color | col1 | color
+------+-------+------+-------
+ 1 | red | 1 | red
+ 2 | green | 2 | green
+ 3 | blue | 3 | blue
(3 rows)
--
diff --git a/src/test/regress/expected/groupingsets_optimizer.out b/src/test/regress/expected/groupingsets_optimizer.out
index d59a83ae9cf..ba6c67a703c 100644
--- a/src/test/regress/expected/groupingsets_optimizer.out
+++ b/src/test/regress/expected/groupingsets_optimizer.out
@@ -522,7 +522,7 @@ select * from (
group by grouping sets(1, 2)
) ss
where x = 1 and q1 = 123;
- QUERY PLAN
+ QUERY PLAN
------------------------------------------------------
Result
Output: NULL::integer, NULL::bigint, NULL::numeric
@@ -662,16 +662,16 @@ CREATE VIEW gstest_view AS select a, b, grouping(a,b), sum(c), count(*), max(c)
from gstest2 group by rollup ((a,b,c),(c,d));
NOTICE: view "gstest_view" will be a temporary view
select pg_get_viewdef('gstest_view'::regclass, true);
- pg_get_viewdef
--------------------------------------------------------------------------------
- SELECT gstest2.a, +
- gstest2.b, +
- GROUPING(gstest2.a, gstest2.b) AS "grouping", +
- sum(gstest2.c) AS sum, +
- count(*) AS count, +
- max(gstest2.c) AS max +
- FROM gstest2 +
- GROUP BY ROLLUP((gstest2.a, gstest2.b, gstest2.c), (gstest2.c, gstest2.d));
+ pg_get_viewdef
+---------------------------------------
+ SELECT a, +
+ b, +
+ GROUPING(a, b) AS "grouping", +
+ sum(c) AS sum, +
+ count(*) AS count, +
+ max(c) AS max +
+ FROM gstest2 +
+ GROUP BY ROLLUP((a, b, c), (c, d));
(1 row)
-- Nested queries with 3 or more levels of nesting
@@ -1925,6 +1925,7 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou
-- test the knapsack
set enable_indexscan = false;
+set hash_mem_multiplier = 1.0;
set work_mem = '64kB';
explain (costs off)
select unique1,
@@ -2418,6 +2419,7 @@ group by cube (g1000,g100,g10) distributed by (g1000);
set jit_above_cost to default;
set enable_sort = true;
set work_mem to default;
+set hash_mem_multiplier to default;
-- Compare results of ORCA plan that relies on "IS NOT DISTINCT FROM" HASH Join
(select * from gs_hash_1 except select * from gs_group_1)
union all
@@ -2568,7 +2570,6 @@ select (select grouping(v1)) from (values ((select 1))) v(v1) group by v1;
QUERY PLAN
---------------------------
GroupAggregate
- Group Key: $2
InitPlan 1 (returns $1)
-> Result
InitPlan 3 (returns $2)
@@ -2576,7 +2577,7 @@ select (select grouping(v1)) from (values ((select 1))) v(v1) group by v1;
-> Result
SubPlan 2
-> Result
-(9 rows)
+(8 rows)
select (select grouping(v1)) from (values ((select 1))) v(v1) group by v1;
grouping
diff --git a/src/test/regress/expected/horology.out b/src/test/regress/expected/horology.out
index f1d66186ee9..d475adf42c9 100644
--- a/src/test/regress/expected/horology.out
+++ b/src/test/regress/expected/horology.out
@@ -2436,13 +2436,12 @@ select count(*) from date_tbl
where f1 between '1997-01-01' and '1998-01-01';
QUERY PLAN
-----------------------------------------------------------------------------------------
- Finalize Aggregate
+ Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
- -> Partial Aggregate
- -> Seq Scan on date_tbl
- Filter: ((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date))
+ -> Seq Scan on date_tbl
+ Filter: ((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date))
Optimizer: Postgres query optimizer
-(6 rows)
+(5 rows)
select count(*) from date_tbl
where f1 between '1997-01-01' and '1998-01-01';
@@ -2456,13 +2455,12 @@ select count(*) from date_tbl
where f1 not between '1997-01-01' and '1998-01-01';
QUERY PLAN
--------------------------------------------------------------------------------------
- Finalize Aggregate
+ Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
- -> Partial Aggregate
- -> Seq Scan on date_tbl
- Filter: ((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date))
+ -> Seq Scan on date_tbl
+ Filter: ((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date))
Optimizer: Postgres query optimizer
-(6 rows)
+(5 rows)
select count(*) from date_tbl
where f1 not between '1997-01-01' and '1998-01-01';
@@ -2476,13 +2474,12 @@ select count(*) from date_tbl
where f1 between symmetric '1997-01-01' and '1998-01-01';
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------
- Finalize Aggregate
+ Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
- -> Partial Aggregate
- -> Seq Scan on date_tbl
- Filter: (((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date)) OR ((f1 >= '01-01-1998'::date) AND (f1 <= '01-01-1997'::date)))
+ -> Seq Scan on date_tbl
+ Filter: (((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date)) OR ((f1 >= '01-01-1998'::date) AND (f1 <= '01-01-1997'::date)))
Optimizer: Postgres query optimizer
-(6 rows)
+(5 rows)
select count(*) from date_tbl
where f1 between symmetric '1997-01-01' and '1998-01-01';
@@ -2496,13 +2493,12 @@ select count(*) from date_tbl
where f1 not between symmetric '1997-01-01' and '1998-01-01';
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------
- Finalize Aggregate
+ Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
- -> Partial Aggregate
- -> Seq Scan on date_tbl
- Filter: (((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date)) AND ((f1 < '01-01-1998'::date) OR (f1 > '01-01-1997'::date)))
+ -> Seq Scan on date_tbl
+ Filter: (((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date)) AND ((f1 < '01-01-1998'::date) OR (f1 > '01-01-1997'::date)))
Optimizer: Postgres query optimizer
-(6 rows)
+(5 rows)
select count(*) from date_tbl
where f1 not between symmetric '1997-01-01' and '1998-01-01';
diff --git a/src/test/regress/expected/incremental_sort_optimizer.out b/src/test/regress/expected/incremental_sort_optimizer.out
index 97550c1beda..215486fe8f5 100644
--- a/src/test/regress/expected/incremental_sort_optimizer.out
+++ b/src/test/regress/expected/incremental_sort_optimizer.out
@@ -1658,17 +1658,3 @@ order by 1, 2;
Optimizer: Postgres query optimizer
(8 rows)
--- Disallow pushing down sort when pathkey is an SRF.
-explain (costs off) select unique1 from tenk1 order by unnest('{1,2}'::int[]);
- QUERY PLAN
------------------------------------------------------
- Result
- -> Gather Motion 3:1 (slice1; segments: 3)
- Merge Key: (unnest('{1,2}'::anyarray))
- -> Sort
- Sort Key: (unnest('{1,2}'::anyarray))
- -> ProjectSet
- -> Seq Scan on tenk1
- Optimizer: Pivotal Optimizer (GPORCA)
-(8 rows)
-
diff --git a/src/test/regress/expected/index_including.out b/src/test/regress/expected/index_including.out
index 12f9d863d01..e13e9cba304 100644
--- a/src/test/regress/expected/index_including.out
+++ b/src/test/regress/expected/index_including.out
@@ -423,7 +423,7 @@ EXPLAIN (COSTS OFF) SELECT c2, c1, c3 FROM nametbl WHERE c2 = 'two' AND c1 = 1;
QUERY PLAN
----------------------------------------------------------
Gather Motion 1:1 (slice1; segments: 1)
- -> Index Only Scan using nametbl_c1_c2_idx on nametbl
+ -> Index Scan using nametbl_c1_c2_idx on nametbl
Index Cond: ((c2 = 'two'::name) AND (c1 = 1))
Optimizer: Postgres query optimizer
(4 rows)
diff --git a/src/test/regress/expected/indexing.out b/src/test/regress/expected/indexing.out
index 9719ce12527..436446d65b7 100644
--- a/src/test/regress/expected/indexing.out
+++ b/src/test/regress/expected/indexing.out
@@ -1310,41 +1310,44 @@ create table idxpart1 partition of idxpart for values from (0) to (100000);
set enable_seqscan to off;
create index idxpart_brin on idxpart using brin(b);
explain (costs off) select * from idxpart where b = 'abcd';
- QUERY PLAN
--------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
- -> Bitmap Heap Scan on idxpart1 idxpart
+ -> Dynamic Bitmap Heap Scan on idxpart
+ Number of partitions to scan: 1 (out of 1)
Recheck Cond: (b = 'abcd'::text)
- -> Bitmap Index Scan on idxpart1_b_idx
+ Filter: (b = 'abcd'::text)
+ -> Dynamic Bitmap Index Scan on idxpart_brin
Index Cond: (b = 'abcd'::text)
- Optimizer: Postgres query optimizer
-(6 rows)
+ Optimizer: GPORCA
+(8 rows)
drop index idxpart_brin;
create index idxpart_spgist on idxpart using spgist(b);
explain (costs off) select * from idxpart where b = 'abcd';
- QUERY PLAN
--------------------------------------------------
+ QUERY PLAN
+----------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
- -> Bitmap Heap Scan on idxpart1 idxpart
- Recheck Cond: (b = 'abcd'::text)
- -> Bitmap Index Scan on idxpart1_b_idx
- Index Cond: (b = 'abcd'::text)
- Optimizer: Postgres query optimizer
-(6 rows)
+ -> Dynamic Seq Scan on idxpart
+ Number of partitions to scan: 1 (out of 1)
+ Filter: (b = 'abcd'::text)
+ Optimizer: GPORCA
+(5 rows)
drop index idxpart_spgist;
create index idxpart_gin on idxpart using gin(c);
explain (costs off) select * from idxpart where c @> array[42];
- QUERY PLAN
-----------------------------------------------------
+ QUERY PLAN
+------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
- -> Bitmap Heap Scan on idxpart1 idxpart
+ -> Dynamic Bitmap Heap Scan on idxpart
+ Number of partitions to scan: 1 (out of 1)
Recheck Cond: (c @> '{42}'::integer[])
- -> Bitmap Index Scan on idxpart1_c_idx
+ Filter: (c @> '{42}'::integer[])
+ -> Dynamic Bitmap Index Scan on idxpart_gin
Index Cond: (c @> '{42}'::integer[])
- Optimizer: Postgres query optimizer
-(6 rows)
+ Optimizer: GPORCA
+(8 rows)
drop index idxpart_gin;
reset enable_seqscan;
diff --git a/src/test/regress/expected/inet_optimizer.out b/src/test/regress/expected/inet_optimizer.out
index d146ca39005..db5cf7f88b2 100644
--- a/src/test/regress/expected/inet_optimizer.out
+++ b/src/test/regress/expected/inet_optimizer.out
@@ -1068,3 +1068,40 @@ SELECT a FROM (VALUES
ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
(91 rows)
+-- test non-error-throwing API for some core types
+SELECT pg_input_is_valid('1234', 'cidr');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT * FROM pg_input_error_info('1234', 'cidr');
+ message | detail | hint | sql_error_code
+--------------------------------------------+--------+------+----------------
+ invalid input syntax for type cidr: "1234" | | | 22P02
+(1 row)
+
+SELECT pg_input_is_valid('192.168.198.200/24', 'cidr');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT * FROM pg_input_error_info('192.168.198.200/24', 'cidr');
+ message | detail | hint | sql_error_code
+------------------------------------------+--------------------------------------+------+----------------
+ invalid cidr value: "192.168.198.200/24" | Value has bits set to right of mask. | | 22P02
+(1 row)
+
+SELECT pg_input_is_valid('1234', 'inet');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT * FROM pg_input_error_info('1234', 'inet');
+ message | detail | hint | sql_error_code
+--------------------------------------------+--------+------+----------------
+ invalid input syntax for type inet: "1234" | | | 22P02
+(1 row)
+
diff --git a/src/test/regress/expected/inherit_optimizer.out b/src/test/regress/expected/inherit_optimizer.out
index ef4a44eac33..d30430b5519 100644
--- a/src/test/regress/expected/inherit_optimizer.out
+++ b/src/test/regress/expected/inherit_optimizer.out
@@ -539,6 +539,34 @@ CREATE TEMP TABLE z (b TEXT, PRIMARY KEY(aa, b)) inherits (a);
INSERT INTO z VALUES (NULL, 'text'); -- should fail
ERROR: null value in column "aa" of relation "z" violates not-null constraint
DETAIL: Failing row contains (null, text).
+-- Check inherited UPDATE with first child excluded
+create table some_tab (f1 int, f2 int, f3 int, check (f1 < 10) no inherit);
+create table some_tab_child () inherits(some_tab);
+insert into some_tab_child select i, i+1, 0 from generate_series(1,1000) i;
+create index on some_tab_child(f1, f2);
+-- while at it, also check that statement-level triggers fire
+create function some_tab_stmt_trig_func() returns trigger as
+$$begin raise notice 'updating some_tab'; return NULL; end;$$
+language plpgsql;
+create trigger some_tab_stmt_trig
+ before update on some_tab execute function some_tab_stmt_trig_func();
+ERROR: Triggers for statements are not yet supported
+explain (costs off)
+update some_tab set f3 = 11 where f1 = 12 and f2 = 13;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------
+ Update on some_tab
+ Update on some_tab_child some_tab_1
+ -> Result
+ -> Index Scan using some_tab_child_f1_f2_idx on some_tab_child some_tab_1
+ Index Cond: ((f1 = 12) AND (f2 = 13))
+ Optimizer: Postgres query optimizer
+(6 rows)
+
+update some_tab set f3 = 11 where f1 = 12 and f2 = 13;
+drop table some_tab cascade;
+NOTICE: drop cascades to table some_tab_child
+drop function some_tab_stmt_trig_func();
-- Check inherited UPDATE with all children excluded
create table some_tab (a int, b int) distributed randomly;
create table some_tab_child () inherits (some_tab);
@@ -1080,6 +1108,34 @@ Inherits: inht1,
Distributed by: (aa)
DROP TABLE inhts;
+-- Test for adding a column to a parent table with complex inheritance
+CREATE TABLE inhta ();
+CREATE TABLE inhtb () INHERITS (inhta);
+CREATE TABLE inhtc () INHERITS (inhtb);
+CREATE TABLE inhtd () INHERITS (inhta, inhtb, inhtc);
+ALTER TABLE inhta ADD COLUMN i int, ADD COLUMN j bigint DEFAULT 1;
+NOTICE: merging definition of column "i" for child "inhtd"
+NOTICE: merging definition of column "j" for child "inhtd"
+\d+ inhta
+ Table "public.inhta"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ i | integer | | | | plain | |
+ j | bigint | | | 1 | plain | |
+Child tables: inhtb,
+ inhtd
+
+\d+ inhtd
+ Table "public.inhtd"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ i | integer | | | | plain | |
+ j | bigint | | | 1 | plain | |
+Inherits: inhta,
+ inhtb,
+ inhtc
+
+DROP TABLE inhta, inhtb, inhtc, inhtd;
-- Test for renaming in diamond inheritance
CREATE TABLE inht2 (x int) INHERITS (inht1);
CREATE TABLE inht3 (y int) INHERITS (inht1);
@@ -1604,6 +1660,39 @@ select min(1-id) from matest0;
reset enable_seqscan;
reset enable_parallel_append;
reset enable_bitmapscan;
+explain (verbose, costs off) -- bug #18652
+select 1 - id as c from
+(select id from matest3 t1 union all select id * 2 from matest3 t2) ss
+order by c;
+ QUERY PLAN
+-------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Output: ((1 - t1.id))
+ Merge Key: ((1 - t1.id))
+ -> Sort
+ Output: ((1 - t1.id))
+ Sort Key: ((1 - t1.id))
+ -> Result
+ Output: (1 - t1.id)
+ -> Append
+ -> Seq Scan on public.matest3 t1
+ Output: t1.id
+ -> Seq Scan on public.matest3 t2
+ Output: (t2.id * 2)
+ Optimizer: GPORCA
+(14 rows)
+
+select 1 - id as c from
+(select id from matest3 t1 union all select id * 2 from matest3 t2) ss
+order by c;
+ c
+-----
+ -4
+ -5
+ -9
+ -11
+(4 rows)
+
drop table matest0 cascade;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table matest1
@@ -1813,6 +1902,117 @@ reset enable_indexscan;
reset enable_bitmapscan;
rollback;
--
+-- Check handling of MULTIEXPR SubPlans in inherited updates
+--
+create table inhpar(f1 int, f2 name);
+create table inhcld(f2 name, f1 int);
+alter table inhcld inherit inhpar;
+insert into inhpar select x, x::text from generate_series(1,5) x;
+insert into inhcld select x::text, x from generate_series(6,10) x;
+explain (verbose, costs off)
+update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1);
+ERROR: can't split update for inherit table:
+update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1);
+ERROR: can't split update for inherit table:
+select * from inhpar order by f1;
+ f1 | f2
+----+----
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+ 10 | 10
+(10 rows)
+
+drop table inhpar cascade;
+NOTICE: drop cascades to table inhcld
+--
+-- And the same for partitioned cases
+--
+create table inhpar(f1 int primary key, f2 name) partition by range (f1);
+create table inhcld1(f2 name, f1 int primary key);
+create table inhcld2(f1 int primary key, f2 name);
+alter table inhpar attach partition inhcld1 for values from (1) to (5);
+alter table inhpar attach partition inhcld2 for values from (5) to (100);
+insert into inhpar select x, x::text from generate_series(1,10) x;
+explain (verbose, costs off)
+update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1);
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------
+ Update on public.inhpar i
+ Update on public.inhcld1 i_1
+ Update on public.inhcld2 i_2
+ -> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
+ Output: ($2), (($3)::name), ((SubPlan 1 (returns $2,$3) (copy 2))), i.tableoid, i.ctid, i.gp_segment_id, (DMLAction)
+ -> Split Update
+ Output: ($2), (($3)::name), ((SubPlan 1 (returns $2,$3) (copy 2))), i.tableoid, i.ctid, i.gp_segment_id, DMLAction
+ -> Append
+ -> Seq Scan on public.inhcld1 i_1
+ Output: $2, $3, (SubPlan 1 (returns $2,$3) (copy 2)), i_1.tableoid, i_1.ctid, i_1.gp_segment_id
+ SubPlan 1 (returns $2,$3) (copy 2)
+ -> Limit
+ Output: (i_1.f1), (((i_1.f2)::text || '-'::text))
+ -> Result
+ Output: i_1.f1, ((i_1.f2)::text || '-'::text)
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on public.int4_tbl
+ -> Seq Scan on public.inhcld2 i_2
+ Output: $2, $3, (SubPlan 1 (returns $2,$3) (copy 3)), i_2.tableoid, i_2.ctid, i_2.gp_segment_id
+ SubPlan 1 (returns $2,$3) (copy 3)
+ -> Limit
+ Output: (i_2.f1), (((i_2.f2)::text || '-'::text))
+ -> Result
+ Output: i_2.f1, ((i_2.f2)::text || '-'::text)
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on public.int4_tbl int4_tbl_1
+ Settings: enable_mergejoin = 'on', enable_bitmapscan = 'off', enable_indexscan = 'on', enable_seqscan = 'off'
+ Optimizer: Postgres query optimizer
+(30 rows)
+
+update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1);
+select * from inhpar;
+ f1 | f2
+----+-----
+ 1 | 1-
+ 2 | 2-
+ 3 | 3-
+ 4 | 4-
+ 5 | 5-
+ 6 | 6-
+ 7 | 7-
+ 8 | 8-
+ 9 | 9-
+ 10 | 10-
+(10 rows)
+
+-- Also check ON CONFLICT
+insert into inhpar as i values (3), (7) on conflict (f1)
+ do update set (f1, f2) = (select i.f1, i.f2 || '+');
+ERROR: modification of distribution columns in OnConflictUpdate is not supported
+select * from inhpar order by f1; -- tuple order might be unstable here
+ f1 | f2
+----+-----
+ 1 | 1-
+ 2 | 2-
+ 3 | 3-
+ 4 | 4-
+ 5 | 5-
+ 6 | 6-
+ 7 | 7-
+ 8 | 8-
+ 9 | 9-
+ 10 | 10-
+(10 rows)
+
+drop table inhpar cascade;
+--
-- Check handling of a constant-null CHECK constraint
--
create table cnullparent (f1 int);
@@ -2331,6 +2531,8 @@ explain (costs off) select * from mcrparted where a < 20 order by a, abs(b), c;
Optimizer: Postgres query optimizer
(12 rows)
+set enable_bitmapscan to off;
+set enable_sort to off;
create table mclparted (a int) partition by list(a);
create table mclparted1 partition of mclparted for values in(1);
create table mclparted2 partition of mclparted for values in(2);
@@ -2364,7 +2566,110 @@ explain (costs off) select * from mclparted order by a;
Optimizer: Pivotal Optimizer (GPORCA)
(7 rows)
+explain (costs off) select * from mclparted where a in(3,4,5) order by a;
+ QUERY PLAN
+----------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: a
+ -> Sort
+ Sort Key: a
+ -> Dynamic Index Scan on mclparted_a_idx on mclparted
+ Index Cond: (a = ANY ('{3,4,5}'::integer[]))
+ Number of partitions to scan: 2 (out of 4)
+ Optimizer: GPORCA
+(8 rows)
+
+-- Introduce a NULL and DEFAULT partition so we can test more complex cases
+create table mclparted_null partition of mclparted for values in(null);
+create table mclparted_def partition of mclparted default;
+-- Append can be used providing we don't scan the interleaved partition
+explain (costs off) select * from mclparted where a in(1,2,4) order by a;
+ QUERY PLAN
+----------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: a
+ -> Sort
+ Sort Key: a
+ -> Dynamic Index Scan on mclparted_a_idx on mclparted
+ Index Cond: (a = ANY ('{1,2,4}'::integer[]))
+ Number of partitions to scan: 3 (out of 6)
+ Optimizer: GPORCA
+(8 rows)
+
+explain (costs off) select * from mclparted where a in(1,2,4) or a is null order by a;
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: a
+ -> Sort
+ Sort Key: a
+ -> Dynamic Seq Scan on mclparted
+ Number of partitions to scan: 4 (out of 6)
+ Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL))
+ Optimizer: GPORCA
+(8 rows)
+
+-- Test a more complex case where the NULL partition allows some other value
+drop table mclparted_null;
+create table mclparted_0_null partition of mclparted for values in(0,null);
+-- Ensure MergeAppend is used since 0 and NULLs are in the same partition.
+explain (costs off) select * from mclparted where a in(1,2,4) or a is null order by a;
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: a
+ -> Sort
+ Sort Key: a
+ -> Dynamic Seq Scan on mclparted
+ Number of partitions to scan: 4 (out of 6)
+ Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL))
+ Optimizer: GPORCA
+(8 rows)
+
+explain (costs off) select * from mclparted where a in(0,1,2,4) order by a;
+ QUERY PLAN
+----------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: a
+ -> Sort
+ Sort Key: a
+ -> Dynamic Index Scan on mclparted_a_idx on mclparted
+ Index Cond: (a = ANY ('{0,1,2,4}'::integer[]))
+ Number of partitions to scan: 4 (out of 6)
+ Optimizer: GPORCA
+(8 rows)
+
+-- Ensure Append is used when the null partition is pruned
+explain (costs off) select * from mclparted where a in(1,2,4) order by a;
+ QUERY PLAN
+----------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: a
+ -> Sort
+ Sort Key: a
+ -> Dynamic Index Scan on mclparted_a_idx on mclparted
+ Index Cond: (a = ANY ('{1,2,4}'::integer[]))
+ Number of partitions to scan: 3 (out of 6)
+ Optimizer: GPORCA
+(8 rows)
+
+-- Ensure MergeAppend is used when the default partition is not pruned
+explain (costs off) select * from mclparted where a in(1,2,4,100) order by a;
+ QUERY PLAN
+----------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: a
+ -> Sort
+ Sort Key: a
+ -> Dynamic Index Scan on mclparted_a_idx on mclparted
+ Index Cond: (a = ANY ('{1,2,4,100}'::integer[]))
+ Number of partitions to scan: 4 (out of 6)
+ Optimizer: GPORCA
+(8 rows)
+
drop table mclparted;
+reset enable_sort;
+reset enable_bitmapscan;
-- Ensure subplans which don't have a path with the correct pathkeys get
-- sorted correctly.
drop index mcrparted_a_abs_c_idx;
@@ -2536,7 +2841,7 @@ alter table permtest_child attach partition permtest_grandchild for values in ('
alter table permtest_parent attach partition permtest_child for values in (1);
create index on permtest_parent (left(c, 3));
insert into permtest_parent
- select 1, 'a', left(md5(i::text), 5) from generate_series(0, 100) i;
+ select 1, 'a', left(fipshash(i::text), 5) from generate_series(0, 100) i;
analyze permtest_parent;
create role regress_no_child_access;
revoke all on permtest_grandchild from regress_no_child_access;
diff --git a/src/test/regress/expected/interval_optimizer.out b/src/test/regress/expected/interval_optimizer.out
index 0edf2284c9f..682813f3b14 100755
--- a/src/test/regress/expected/interval_optimizer.out
+++ b/src/test/regress/expected/interval_optimizer.out
@@ -72,6 +72,37 @@ INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago');
ERROR: invalid input syntax for type interval: "@ 30 eons ago"
LINE 1: INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago');
^
+-- Test non-error-throwing API
+SELECT pg_input_is_valid('1.5 weeks', 'interval');
+ pg_input_is_valid
+-------------------
+ t
+(1 row)
+
+SELECT pg_input_is_valid('garbage', 'interval');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT pg_input_is_valid('@ 30 eons ago', 'interval');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT * FROM pg_input_error_info('garbage', 'interval');
+ message | detail | hint | sql_error_code
+---------------------------------------------------+--------+------+----------------
+ invalid input syntax for type interval: "garbage" | | | 22007
+(1 row)
+
+SELECT * FROM pg_input_error_info('@ 30 eons ago', 'interval');
+ message | detail | hint | sql_error_code
+---------------------------------------------------------+--------+------+----------------
+ invalid input syntax for type interval: "@ 30 eons ago" | | | 22007
+(1 row)
+
-- test interval operators
SELECT * FROM INTERVAL_TBL;
f1
@@ -362,6 +393,19 @@ SELECT * FROM INTERVAL_TBL;
@ 6 years
(10 rows)
+-- multiplication and division overflow test cases
+SELECT '3000000 months'::interval * 1000;
+ERROR: interval out of range
+SELECT '3000000 months'::interval / 0.001;
+ERROR: interval out of range
+SELECT '3000000 days'::interval * 1000;
+ERROR: interval out of range
+SELECT '3000000 days'::interval / 0.001;
+ERROR: interval out of range
+SELECT '1 month 2146410 days'::interval * 1000.5002;
+ERROR: interval out of range
+SELECT '4611686018427387904 usec'::interval / 0.1;
+ERROR: interval out of range
-- test avg(interval), which is somewhat fragile since people have been
-- known to change the allowed input syntax for type interval without
-- updating pg_aggregate.agginitval
@@ -401,6 +445,10 @@ SELECT justify_days(interval '6 months 36 days 5 hours 4 minutes 3 seconds') as
@ 7 mons 6 days 5 hours 4 mins 3 secs
(1 row)
+SELECT justify_hours(interval '2147483647 days 24 hrs');
+ERROR: interval out of range
+SELECT justify_days(interval '2147483647 months 30 days');
+ERROR: interval out of range
-- test justify_interval()
SELECT justify_interval(interval '1 month -1 hour') as "1 month -1 hour";
1 month -1 hour
@@ -408,6 +456,38 @@ SELECT justify_interval(interval '1 month -1 hour') as "1 month -1 hour";
@ 29 days 23 hours
(1 row)
+SELECT justify_interval(interval '2147483647 days 24 hrs');
+ justify_interval
+-------------------------------
+ @ 5965232 years 4 mons 8 days
+(1 row)
+
+SELECT justify_interval(interval '-2147483648 days -24 hrs');
+ justify_interval
+-----------------------------------
+ @ 5965232 years 4 mons 9 days ago
+(1 row)
+
+SELECT justify_interval(interval '2147483647 months 30 days');
+ERROR: interval out of range
+SELECT justify_interval(interval '-2147483648 months -30 days');
+ERROR: interval out of range
+SELECT justify_interval(interval '2147483647 months 30 days -24 hrs');
+ justify_interval
+----------------------------------
+ @ 178956970 years 7 mons 29 days
+(1 row)
+
+SELECT justify_interval(interval '-2147483648 months -30 days 24 hrs');
+ justify_interval
+--------------------------------------
+ @ 178956970 years 8 mons 29 days ago
+(1 row)
+
+SELECT justify_interval(interval '2147483647 months -30 days 1440 hrs');
+ERROR: interval out of range
+SELECT justify_interval(interval '-2147483648 months 30 days -1440 hrs');
+ERROR: interval out of range
-- test fractional second input, and detection of duplicate units
SET DATESTYLE = 'ISO';
SET IntervalStyle TO postgres;
@@ -795,6 +875,16 @@ SELECT interval '+1 -1:00:00',
1 day -01:00:00 | -1 days +01:00:00 | 1 year 2 mons -3 days +04:05:06.789 | -1 years -2 mons +3 days -04:05:06.789
(1 row)
+-- cases that trigger sign-matching rules in the sql style
+SELECT interval '-23 hours 45 min 12.34 sec',
+ interval '-1 day 23 hours 45 min 12.34 sec',
+ interval '-1 year 2 months 1 day 23 hours 45 min 12.34 sec',
+ interval '-1 year 2 months 1 day 23 hours 45 min +12.34 sec';
+ interval | interval | interval | interval
+--------------+----------------------+-----------------------------+-----------------------------
+ -22:14:47.66 | -1 days +23:45:12.34 | -10 mons +1 day 23:45:12.34 | -10 mons +1 day 23:45:12.34
+(1 row)
+
-- test output of couple non-standard interval values in the sql style
SET IntervalStyle TO sql_standard;
SELECT interval '1 day -1 hours',
@@ -806,6 +896,21 @@ SELECT interval '1 day -1 hours',
+0-0 +1 -1:00:00 | +0-0 -1 +1:00:00 | +1-2 -3 +4:05:06.789 | -1-2 +3 -4:05:06.789
(1 row)
+-- cases that trigger sign-matching rules in the sql style
+SELECT interval '-23 hours 45 min 12.34 sec',
+ interval '-1 day 23 hours 45 min 12.34 sec',
+ interval '-1 year 2 months 1 day 23 hours 45 min 12.34 sec',
+ interval '-1 year 2 months 1 day 23 hours 45 min +12.34 sec';
+ interval | interval | interval | interval
+--------------+----------------+----------------------+-----------------------
+ -23:45:12.34 | -1 23:45:12.34 | -1-2 -1 -23:45:12.34 | -0-10 +1 +23:45:12.34
+(1 row)
+
+-- edge case for sign-matching rules
+SELECT interval ''; -- error
+ERROR: invalid input syntax for type interval: ""
+LINE 1: SELECT interval '';
+ ^
-- test outputting iso8601 intervals
SET IntervalStyle to iso_8601;
select interval '0' AS "zero",
@@ -857,6 +962,47 @@ select interval 'P0002' AS "year only",
2 years | 2 years 10 mons | 2 years 10 mons 15 days | 2 years 00:00:01 | 2 years 10 mons 00:00:01 | 2 years 10 mons 15 days 00:00:01 | 10:00:00 | 10:30:00
(1 row)
+-- Check handling of fractional fields in ISO8601 format.
+select interval 'P1Y0M3DT4H5M6S';
+ interval
+------------------------
+ 1 year 3 days 04:05:06
+(1 row)
+
+select interval 'P1.0Y0M3DT4H5M6S';
+ interval
+------------------------
+ 1 year 3 days 04:05:06
+(1 row)
+
+select interval 'P1.1Y0M3DT4H5M6S';
+ interval
+------------------------------
+ 1 year 1 mon 3 days 04:05:06
+(1 row)
+
+select interval 'P1.Y0M3DT4H5M6S';
+ interval
+------------------------
+ 1 year 3 days 04:05:06
+(1 row)
+
+select interval 'P.1Y0M3DT4H5M6S';
+ interval
+-----------------------
+ 1 mon 3 days 04:05:06
+(1 row)
+
+select interval 'P10.5e4Y'; -- not per spec, but we've historically taken it
+ interval
+--------------
+ 105000 years
+(1 row)
+
+select interval 'P.Y0M3DT4H5M6S'; -- error
+ERROR: invalid input syntax for type interval: "P.Y0M3DT4H5M6S"
+LINE 1: select interval 'P.Y0M3DT4H5M6S';
+ ^
-- test a couple rounding cases that changed since 8.3 w/ HAVE_INT64_TIMESTAMP.
SET IntervalStyle to postgres_verbose;
select interval '-10 mons -3 days +03:55:06.70';
@@ -877,6 +1023,617 @@ select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds';
@ 0.7 secs | @ 0.7 secs | @ 0.7 secs
(1 row)
+-- test time fields using entire 64 bit microseconds range
+select interval '2562047788.01521550194 hours';
+ interval
+-----------------------------------
+ @ 2562047788 hours 54.775807 secs
+(1 row)
+
+select interval '-2562047788.01521550222 hours';
+ interval
+---------------------------------------
+ @ 2562047788 hours 54.775808 secs ago
+(1 row)
+
+select interval '153722867280.912930117 minutes';
+ interval
+-----------------------------------
+ @ 2562047788 hours 54.775807 secs
+(1 row)
+
+select interval '-153722867280.912930133 minutes';
+ interval
+---------------------------------------
+ @ 2562047788 hours 54.775808 secs ago
+(1 row)
+
+select interval '9223372036854.775807 seconds';
+ interval
+-----------------------------------
+ @ 2562047788 hours 54.775807 secs
+(1 row)
+
+select interval '-9223372036854.775808 seconds';
+ interval
+---------------------------------------
+ @ 2562047788 hours 54.775808 secs ago
+(1 row)
+
+select interval '9223372036854775.807 milliseconds';
+ interval
+-----------------------------------
+ @ 2562047788 hours 54.775807 secs
+(1 row)
+
+select interval '-9223372036854775.808 milliseconds';
+ interval
+---------------------------------------
+ @ 2562047788 hours 54.775808 secs ago
+(1 row)
+
+select interval '9223372036854775807 microseconds';
+ interval
+-----------------------------------
+ @ 2562047788 hours 54.775807 secs
+(1 row)
+
+select interval '-9223372036854775808 microseconds';
+ interval
+---------------------------------------
+ @ 2562047788 hours 54.775808 secs ago
+(1 row)
+
+select interval 'PT2562047788H54.775807S';
+ interval
+-----------------------------------
+ @ 2562047788 hours 54.775807 secs
+(1 row)
+
+select interval 'PT-2562047788H-54.775808S';
+ interval
+---------------------------------------
+ @ 2562047788 hours 54.775808 secs ago
+(1 row)
+
+select interval 'PT2562047788:00:54.775807';
+ interval
+-----------------------------------
+ @ 2562047788 hours 54.775807 secs
+(1 row)
+
+select interval 'PT2562047788.0152155019444';
+ interval
+-----------------------------------
+ @ 2562047788 hours 54.775429 secs
+(1 row)
+
+select interval 'PT-2562047788.0152155022222';
+ interval
+---------------------------------------
+ @ 2562047788 hours 54.775429 secs ago
+(1 row)
+
+-- overflow each date/time field
+select interval '2147483648 years';
+ERROR: interval field value out of range: "2147483648 years"
+LINE 1: select interval '2147483648 years';
+ ^
+select interval '-2147483649 years';
+ERROR: interval field value out of range: "-2147483649 years"
+LINE 1: select interval '-2147483649 years';
+ ^
+select interval '2147483648 months';
+ERROR: interval field value out of range: "2147483648 months"
+LINE 1: select interval '2147483648 months';
+ ^
+select interval '-2147483649 months';
+ERROR: interval field value out of range: "-2147483649 months"
+LINE 1: select interval '-2147483649 months';
+ ^
+select interval '2147483648 days';
+ERROR: interval field value out of range: "2147483648 days"
+LINE 1: select interval '2147483648 days';
+ ^
+select interval '-2147483649 days';
+ERROR: interval field value out of range: "-2147483649 days"
+LINE 1: select interval '-2147483649 days';
+ ^
+select interval '2562047789 hours';
+ERROR: interval field value out of range: "2562047789 hours"
+LINE 1: select interval '2562047789 hours';
+ ^
+select interval '-2562047789 hours';
+ERROR: interval field value out of range: "-2562047789 hours"
+LINE 1: select interval '-2562047789 hours';
+ ^
+select interval '153722867281 minutes';
+ERROR: interval field value out of range: "153722867281 minutes"
+LINE 1: select interval '153722867281 minutes';
+ ^
+select interval '-153722867281 minutes';
+ERROR: interval field value out of range: "-153722867281 minutes"
+LINE 1: select interval '-153722867281 minutes';
+ ^
+select interval '9223372036855 seconds';
+ERROR: interval field value out of range: "9223372036855 seconds"
+LINE 1: select interval '9223372036855 seconds';
+ ^
+select interval '-9223372036855 seconds';
+ERROR: interval field value out of range: "-9223372036855 seconds"
+LINE 1: select interval '-9223372036855 seconds';
+ ^
+select interval '9223372036854777 millisecond';
+ERROR: interval field value out of range: "9223372036854777 millisecond"
+LINE 1: select interval '9223372036854777 millisecond';
+ ^
+select interval '-9223372036854777 millisecond';
+ERROR: interval field value out of range: "-9223372036854777 millisecond"
+LINE 1: select interval '-9223372036854777 millisecond';
+ ^
+select interval '9223372036854775808 microsecond';
+ERROR: interval field value out of range: "9223372036854775808 microsecond"
+LINE 1: select interval '9223372036854775808 microsecond';
+ ^
+select interval '-9223372036854775809 microsecond';
+ERROR: interval field value out of range: "-9223372036854775809 microsecond"
+LINE 1: select interval '-9223372036854775809 microsecond';
+ ^
+select interval 'P2147483648';
+ERROR: interval field value out of range: "P2147483648"
+LINE 1: select interval 'P2147483648';
+ ^
+select interval 'P-2147483649';
+ERROR: interval field value out of range: "P-2147483649"
+LINE 1: select interval 'P-2147483649';
+ ^
+select interval 'P1-2147483647-2147483647';
+ERROR: interval out of range
+LINE 1: select interval 'P1-2147483647-2147483647';
+ ^
+select interval 'PT2562047789';
+ERROR: interval field value out of range: "PT2562047789"
+LINE 1: select interval 'PT2562047789';
+ ^
+select interval 'PT-2562047789';
+ERROR: interval field value out of range: "PT-2562047789"
+LINE 1: select interval 'PT-2562047789';
+ ^
+-- overflow with date/time unit aliases
+select interval '2147483647 weeks';
+ERROR: interval field value out of range: "2147483647 weeks"
+LINE 1: select interval '2147483647 weeks';
+ ^
+select interval '-2147483648 weeks';
+ERROR: interval field value out of range: "-2147483648 weeks"
+LINE 1: select interval '-2147483648 weeks';
+ ^
+select interval '2147483647 decades';
+ERROR: interval field value out of range: "2147483647 decades"
+LINE 1: select interval '2147483647 decades';
+ ^
+select interval '-2147483648 decades';
+ERROR: interval field value out of range: "-2147483648 decades"
+LINE 1: select interval '-2147483648 decades';
+ ^
+select interval '2147483647 centuries';
+ERROR: interval field value out of range: "2147483647 centuries"
+LINE 1: select interval '2147483647 centuries';
+ ^
+select interval '-2147483648 centuries';
+ERROR: interval field value out of range: "-2147483648 centuries"
+LINE 1: select interval '-2147483648 centuries';
+ ^
+select interval '2147483647 millennium';
+ERROR: interval field value out of range: "2147483647 millennium"
+LINE 1: select interval '2147483647 millennium';
+ ^
+select interval '-2147483648 millennium';
+ERROR: interval field value out of range: "-2147483648 millennium"
+LINE 1: select interval '-2147483648 millennium';
+ ^
+select interval '1 week 2147483647 days';
+ERROR: interval field value out of range: "1 week 2147483647 days"
+LINE 1: select interval '1 week 2147483647 days';
+ ^
+select interval '-1 week -2147483648 days';
+ERROR: interval field value out of range: "-1 week -2147483648 days"
+LINE 1: select interval '-1 week -2147483648 days';
+ ^
+select interval '2147483647 days 1 week';
+ERROR: interval field value out of range: "2147483647 days 1 week"
+LINE 1: select interval '2147483647 days 1 week';
+ ^
+select interval '-2147483648 days -1 week';
+ERROR: interval field value out of range: "-2147483648 days -1 week"
+LINE 1: select interval '-2147483648 days -1 week';
+ ^
+select interval 'P1W2147483647D';
+ERROR: interval field value out of range: "P1W2147483647D"
+LINE 1: select interval 'P1W2147483647D';
+ ^
+select interval 'P-1W-2147483648D';
+ERROR: interval field value out of range: "P-1W-2147483648D"
+LINE 1: select interval 'P-1W-2147483648D';
+ ^
+select interval 'P2147483647D1W';
+ERROR: interval field value out of range: "P2147483647D1W"
+LINE 1: select interval 'P2147483647D1W';
+ ^
+select interval 'P-2147483648D-1W';
+ERROR: interval field value out of range: "P-2147483648D-1W"
+LINE 1: select interval 'P-2147483648D-1W';
+ ^
+select interval '1 decade 2147483647 years';
+ERROR: interval field value out of range: "1 decade 2147483647 years"
+LINE 1: select interval '1 decade 2147483647 years';
+ ^
+select interval '1 century 2147483647 years';
+ERROR: interval field value out of range: "1 century 2147483647 years"
+LINE 1: select interval '1 century 2147483647 years';
+ ^
+select interval '1 millennium 2147483647 years';
+ERROR: interval field value out of range: "1 millennium 2147483647 years"
+LINE 1: select interval '1 millennium 2147483647 years';
+ ^
+select interval '-1 decade -2147483648 years';
+ERROR: interval field value out of range: "-1 decade -2147483648 years"
+LINE 1: select interval '-1 decade -2147483648 years';
+ ^
+select interval '-1 century -2147483648 years';
+ERROR: interval field value out of range: "-1 century -2147483648 years"
+LINE 1: select interval '-1 century -2147483648 years';
+ ^
+select interval '-1 millennium -2147483648 years';
+ERROR: interval field value out of range: "-1 millennium -2147483648 years"
+LINE 1: select interval '-1 millennium -2147483648 years';
+ ^
+select interval '2147483647 years 1 decade';
+ERROR: interval field value out of range: "2147483647 years 1 decade"
+LINE 1: select interval '2147483647 years 1 decade';
+ ^
+select interval '2147483647 years 1 century';
+ERROR: interval field value out of range: "2147483647 years 1 century"
+LINE 1: select interval '2147483647 years 1 century';
+ ^
+select interval '2147483647 years 1 millennium';
+ERROR: interval field value out of range: "2147483647 years 1 millennium"
+LINE 1: select interval '2147483647 years 1 millennium';
+ ^
+select interval '-2147483648 years -1 decade';
+ERROR: interval field value out of range: "-2147483648 years -1 decade"
+LINE 1: select interval '-2147483648 years -1 decade';
+ ^
+select interval '-2147483648 years -1 century';
+ERROR: interval field value out of range: "-2147483648 years -1 century"
+LINE 1: select interval '-2147483648 years -1 century';
+ ^
+select interval '-2147483648 years -1 millennium';
+ERROR: interval field value out of range: "-2147483648 years -1 millennium"
+LINE 1: select interval '-2147483648 years -1 millennium';
+ ^
+-- overflowing with fractional fields - postgres format
+select interval '0.1 millennium 2147483647 months';
+ERROR: interval field value out of range: "0.1 millennium 2147483647 months"
+LINE 1: select interval '0.1 millennium 2147483647 months';
+ ^
+select interval '0.1 centuries 2147483647 months';
+ERROR: interval field value out of range: "0.1 centuries 2147483647 months"
+LINE 1: select interval '0.1 centuries 2147483647 months';
+ ^
+select interval '0.1 decades 2147483647 months';
+ERROR: interval field value out of range: "0.1 decades 2147483647 months"
+LINE 1: select interval '0.1 decades 2147483647 months';
+ ^
+select interval '0.1 yrs 2147483647 months';
+ERROR: interval field value out of range: "0.1 yrs 2147483647 months"
+LINE 1: select interval '0.1 yrs 2147483647 months';
+ ^
+select interval '-0.1 millennium -2147483648 months';
+ERROR: interval field value out of range: "-0.1 millennium -2147483648 months"
+LINE 1: select interval '-0.1 millennium -2147483648 months';
+ ^
+select interval '-0.1 centuries -2147483648 months';
+ERROR: interval field value out of range: "-0.1 centuries -2147483648 months"
+LINE 1: select interval '-0.1 centuries -2147483648 months';
+ ^
+select interval '-0.1 decades -2147483648 months';
+ERROR: interval field value out of range: "-0.1 decades -2147483648 months"
+LINE 1: select interval '-0.1 decades -2147483648 months';
+ ^
+select interval '-0.1 yrs -2147483648 months';
+ERROR: interval field value out of range: "-0.1 yrs -2147483648 months"
+LINE 1: select interval '-0.1 yrs -2147483648 months';
+ ^
+select interval '2147483647 months 0.1 millennium';
+ERROR: interval field value out of range: "2147483647 months 0.1 millennium"
+LINE 1: select interval '2147483647 months 0.1 millennium';
+ ^
+select interval '2147483647 months 0.1 centuries';
+ERROR: interval field value out of range: "2147483647 months 0.1 centuries"
+LINE 1: select interval '2147483647 months 0.1 centuries';
+ ^
+select interval '2147483647 months 0.1 decades';
+ERROR: interval field value out of range: "2147483647 months 0.1 decades"
+LINE 1: select interval '2147483647 months 0.1 decades';
+ ^
+select interval '2147483647 months 0.1 yrs';
+ERROR: interval field value out of range: "2147483647 months 0.1 yrs"
+LINE 1: select interval '2147483647 months 0.1 yrs';
+ ^
+select interval '-2147483648 months -0.1 millennium';
+ERROR: interval field value out of range: "-2147483648 months -0.1 millennium"
+LINE 1: select interval '-2147483648 months -0.1 millennium';
+ ^
+select interval '-2147483648 months -0.1 centuries';
+ERROR: interval field value out of range: "-2147483648 months -0.1 centuries"
+LINE 1: select interval '-2147483648 months -0.1 centuries';
+ ^
+select interval '-2147483648 months -0.1 decades';
+ERROR: interval field value out of range: "-2147483648 months -0.1 decades"
+LINE 1: select interval '-2147483648 months -0.1 decades';
+ ^
+select interval '-2147483648 months -0.1 yrs';
+ERROR: interval field value out of range: "-2147483648 months -0.1 yrs"
+LINE 1: select interval '-2147483648 months -0.1 yrs';
+ ^
+select interval '0.1 months 2147483647 days';
+ERROR: interval field value out of range: "0.1 months 2147483647 days"
+LINE 1: select interval '0.1 months 2147483647 days';
+ ^
+select interval '-0.1 months -2147483648 days';
+ERROR: interval field value out of range: "-0.1 months -2147483648 days"
+LINE 1: select interval '-0.1 months -2147483648 days';
+ ^
+select interval '2147483647 days 0.1 months';
+ERROR: interval field value out of range: "2147483647 days 0.1 months"
+LINE 1: select interval '2147483647 days 0.1 months';
+ ^
+select interval '-2147483648 days -0.1 months';
+ERROR: interval field value out of range: "-2147483648 days -0.1 months"
+LINE 1: select interval '-2147483648 days -0.1 months';
+ ^
+select interval '0.5 weeks 2147483647 days';
+ERROR: interval field value out of range: "0.5 weeks 2147483647 days"
+LINE 1: select interval '0.5 weeks 2147483647 days';
+ ^
+select interval '-0.5 weeks -2147483648 days';
+ERROR: interval field value out of range: "-0.5 weeks -2147483648 days"
+LINE 1: select interval '-0.5 weeks -2147483648 days';
+ ^
+select interval '2147483647 days 0.5 weeks';
+ERROR: interval field value out of range: "2147483647 days 0.5 weeks"
+LINE 1: select interval '2147483647 days 0.5 weeks';
+ ^
+select interval '-2147483648 days -0.5 weeks';
+ERROR: interval field value out of range: "-2147483648 days -0.5 weeks"
+LINE 1: select interval '-2147483648 days -0.5 weeks';
+ ^
+select interval '0.01 months 9223372036854775807 microseconds';
+ERROR: interval field value out of range: "0.01 months 9223372036854775807 microseconds"
+LINE 1: select interval '0.01 months 9223372036854775807 microsecond...
+ ^
+select interval '-0.01 months -9223372036854775808 microseconds';
+ERROR: interval field value out of range: "-0.01 months -9223372036854775808 microseconds"
+LINE 1: select interval '-0.01 months -9223372036854775808 microseco...
+ ^
+select interval '9223372036854775807 microseconds 0.01 months';
+ERROR: interval field value out of range: "9223372036854775807 microseconds 0.01 months"
+LINE 1: select interval '9223372036854775807 microseconds 0.01 month...
+ ^
+select interval '-9223372036854775808 microseconds -0.01 months';
+ERROR: interval field value out of range: "-9223372036854775808 microseconds -0.01 months"
+LINE 1: select interval '-9223372036854775808 microseconds -0.01 mon...
+ ^
+select interval '0.1 weeks 9223372036854775807 microseconds';
+ERROR: interval field value out of range: "0.1 weeks 9223372036854775807 microseconds"
+LINE 1: select interval '0.1 weeks 9223372036854775807 microseconds'...
+ ^
+select interval '-0.1 weeks -9223372036854775808 microseconds';
+ERROR: interval field value out of range: "-0.1 weeks -9223372036854775808 microseconds"
+LINE 1: select interval '-0.1 weeks -9223372036854775808 microsecond...
+ ^
+select interval '9223372036854775807 microseconds 0.1 weeks';
+ERROR: interval field value out of range: "9223372036854775807 microseconds 0.1 weeks"
+LINE 1: select interval '9223372036854775807 microseconds 0.1 weeks'...
+ ^
+select interval '-9223372036854775808 microseconds -0.1 weeks';
+ERROR: interval field value out of range: "-9223372036854775808 microseconds -0.1 weeks"
+LINE 1: select interval '-9223372036854775808 microseconds -0.1 week...
+ ^
+select interval '0.1 days 9223372036854775807 microseconds';
+ERROR: interval field value out of range: "0.1 days 9223372036854775807 microseconds"
+LINE 1: select interval '0.1 days 9223372036854775807 microseconds';
+ ^
+select interval '-0.1 days -9223372036854775808 microseconds';
+ERROR: interval field value out of range: "-0.1 days -9223372036854775808 microseconds"
+LINE 1: select interval '-0.1 days -9223372036854775808 microseconds...
+ ^
+select interval '9223372036854775807 microseconds 0.1 days';
+ERROR: interval field value out of range: "9223372036854775807 microseconds 0.1 days"
+LINE 1: select interval '9223372036854775807 microseconds 0.1 days';
+ ^
+select interval '-9223372036854775808 microseconds -0.1 days';
+ERROR: interval field value out of range: "-9223372036854775808 microseconds -0.1 days"
+LINE 1: select interval '-9223372036854775808 microseconds -0.1 days...
+ ^
+-- overflowing with fractional fields - ISO8601 format
+select interval 'P0.1Y2147483647M';
+ERROR: interval field value out of range: "P0.1Y2147483647M"
+LINE 1: select interval 'P0.1Y2147483647M';
+ ^
+select interval 'P-0.1Y-2147483648M';
+ERROR: interval field value out of range: "P-0.1Y-2147483648M"
+LINE 1: select interval 'P-0.1Y-2147483648M';
+ ^
+select interval 'P2147483647M0.1Y';
+ERROR: interval field value out of range: "P2147483647M0.1Y"
+LINE 1: select interval 'P2147483647M0.1Y';
+ ^
+select interval 'P-2147483648M-0.1Y';
+ERROR: interval field value out of range: "P-2147483648M-0.1Y"
+LINE 1: select interval 'P-2147483648M-0.1Y';
+ ^
+select interval 'P0.1M2147483647D';
+ERROR: interval field value out of range: "P0.1M2147483647D"
+LINE 1: select interval 'P0.1M2147483647D';
+ ^
+select interval 'P-0.1M-2147483648D';
+ERROR: interval field value out of range: "P-0.1M-2147483648D"
+LINE 1: select interval 'P-0.1M-2147483648D';
+ ^
+select interval 'P2147483647D0.1M';
+ERROR: interval field value out of range: "P2147483647D0.1M"
+LINE 1: select interval 'P2147483647D0.1M';
+ ^
+select interval 'P-2147483648D-0.1M';
+ERROR: interval field value out of range: "P-2147483648D-0.1M"
+LINE 1: select interval 'P-2147483648D-0.1M';
+ ^
+select interval 'P0.5W2147483647D';
+ERROR: interval field value out of range: "P0.5W2147483647D"
+LINE 1: select interval 'P0.5W2147483647D';
+ ^
+select interval 'P-0.5W-2147483648D';
+ERROR: interval field value out of range: "P-0.5W-2147483648D"
+LINE 1: select interval 'P-0.5W-2147483648D';
+ ^
+select interval 'P2147483647D0.5W';
+ERROR: interval field value out of range: "P2147483647D0.5W"
+LINE 1: select interval 'P2147483647D0.5W';
+ ^
+select interval 'P-2147483648D-0.5W';
+ERROR: interval field value out of range: "P-2147483648D-0.5W"
+LINE 1: select interval 'P-2147483648D-0.5W';
+ ^
+select interval 'P0.01MT2562047788H54.775807S';
+ERROR: interval field value out of range: "P0.01MT2562047788H54.775807S"
+LINE 1: select interval 'P0.01MT2562047788H54.775807S';
+ ^
+select interval 'P-0.01MT-2562047788H-54.775808S';
+ERROR: interval field value out of range: "P-0.01MT-2562047788H-54.775808S"
+LINE 1: select interval 'P-0.01MT-2562047788H-54.775808S';
+ ^
+select interval 'P0.1DT2562047788H54.775807S';
+ERROR: interval field value out of range: "P0.1DT2562047788H54.775807S"
+LINE 1: select interval 'P0.1DT2562047788H54.775807S';
+ ^
+select interval 'P-0.1DT-2562047788H-54.775808S';
+ERROR: interval field value out of range: "P-0.1DT-2562047788H-54.775808S"
+LINE 1: select interval 'P-0.1DT-2562047788H-54.775808S';
+ ^
+select interval 'PT2562047788.1H54.775807S';
+ERROR: interval field value out of range: "PT2562047788.1H54.775807S"
+LINE 1: select interval 'PT2562047788.1H54.775807S';
+ ^
+select interval 'PT-2562047788.1H-54.775808S';
+ERROR: interval field value out of range: "PT-2562047788.1H-54.775808S"
+LINE 1: select interval 'PT-2562047788.1H-54.775808S';
+ ^
+select interval 'PT2562047788H0.1M54.775807S';
+ERROR: interval field value out of range: "PT2562047788H0.1M54.775807S"
+LINE 1: select interval 'PT2562047788H0.1M54.775807S';
+ ^
+select interval 'PT-2562047788H-0.1M-54.775808S';
+ERROR: interval field value out of range: "PT-2562047788H-0.1M-54.775808S"
+LINE 1: select interval 'PT-2562047788H-0.1M-54.775808S';
+ ^
+-- overflowing with fractional fields - ISO8601 alternative format
+select interval 'P0.1-2147483647-00';
+ERROR: interval field value out of range: "P0.1-2147483647-00"
+LINE 1: select interval 'P0.1-2147483647-00';
+ ^
+select interval 'P00-0.1-2147483647';
+ERROR: interval field value out of range: "P00-0.1-2147483647"
+LINE 1: select interval 'P00-0.1-2147483647';
+ ^
+select interval 'P00-0.01-00T2562047788:00:54.775807';
+ERROR: interval field value out of range: "P00-0.01-00T2562047788:00:54.775807"
+LINE 1: select interval 'P00-0.01-00T2562047788:00:54.775807';
+ ^
+select interval 'P00-00-0.1T2562047788:00:54.775807';
+ERROR: interval field value out of range: "P00-00-0.1T2562047788:00:54.775807"
+LINE 1: select interval 'P00-00-0.1T2562047788:00:54.775807';
+ ^
+select interval 'PT2562047788.1:00:54.775807';
+ERROR: interval field value out of range: "PT2562047788.1:00:54.775807"
+LINE 1: select interval 'PT2562047788.1:00:54.775807';
+ ^
+select interval 'PT2562047788:01.:54.775807';
+ERROR: interval field value out of range: "PT2562047788:01.:54.775807"
+LINE 1: select interval 'PT2562047788:01.:54.775807';
+ ^
+-- overflowing with fractional fields - SQL standard format
+select interval '0.1 2562047788:0:54.775807';
+ERROR: interval field value out of range: "0.1 2562047788:0:54.775807"
+LINE 1: select interval '0.1 2562047788:0:54.775807';
+ ^
+select interval '0.1 2562047788:0:54.775808 ago';
+ERROR: interval field value out of range: "0.1 2562047788:0:54.775808 ago"
+LINE 1: select interval '0.1 2562047788:0:54.775808 ago';
+ ^
+select interval '2562047788.1:0:54.775807';
+ERROR: interval field value out of range: "2562047788.1:0:54.775807"
+LINE 1: select interval '2562047788.1:0:54.775807';
+ ^
+select interval '2562047788.1:0:54.775808 ago';
+ERROR: interval field value out of range: "2562047788.1:0:54.775808 ago"
+LINE 1: select interval '2562047788.1:0:54.775808 ago';
+ ^
+select interval '2562047788:0.1:54.775807';
+ERROR: invalid input syntax for type interval: "2562047788:0.1:54.775807"
+LINE 1: select interval '2562047788:0.1:54.775807';
+ ^
+select interval '2562047788:0.1:54.775808 ago';
+ERROR: invalid input syntax for type interval: "2562047788:0.1:54.775808 ago"
+LINE 1: select interval '2562047788:0.1:54.775808 ago';
+ ^
+-- overflowing using AGO with INT_MIN
+select interval '-2147483648 months ago';
+ERROR: interval field value out of range: "-2147483648 months ago"
+LINE 1: select interval '-2147483648 months ago';
+ ^
+select interval '-2147483648 days ago';
+ERROR: interval field value out of range: "-2147483648 days ago"
+LINE 1: select interval '-2147483648 days ago';
+ ^
+select interval '-9223372036854775808 microseconds ago';
+ERROR: interval field value out of range: "-9223372036854775808 microseconds ago"
+LINE 1: select interval '-9223372036854775808 microseconds ago';
+ ^
+select interval '-2147483648 months -2147483648 days -9223372036854775808 microseconds ago';
+ERROR: interval field value out of range: "-2147483648 months -2147483648 days -9223372036854775808 microseconds ago"
+LINE 1: select interval '-2147483648 months -2147483648 days -922337...
+ ^
+-- test that INT_MIN number is formatted properly
+SET IntervalStyle to postgres;
+select interval '-2147483648 months -2147483648 days -9223372036854775808 us';
+ interval
+--------------------------------------------------------------------
+ -178956970 years -8 mons -2147483648 days -2562047788:00:54.775808
+(1 row)
+
+SET IntervalStyle to sql_standard;
+select interval '-2147483648 months -2147483648 days -9223372036854775808 us';
+ interval
+---------------------------------------------------
+ -178956970-8 -2147483648 -2562047788:00:54.775808
+(1 row)
+
+SET IntervalStyle to iso_8601;
+select interval '-2147483648 months -2147483648 days -9223372036854775808 us';
+ interval
+-----------------------------------------------------
+ P-178956970Y-8M-2147483648DT-2562047788H-54.775808S
+(1 row)
+
+SET IntervalStyle to postgres_verbose;
+select interval '-2147483648 months -2147483648 days -9223372036854775808 us';
+ interval
+------------------------------------------------------------------------------
+ @ 178956970 years 8 mons 2147483648 days 2562047788 hours 54.775808 secs ago
+(1 row)
+
-- check that '30 days' equals '1 month' according to the hash function
select '30 days'::interval = '1 month'::interval as t;
t
@@ -968,9 +1725,9 @@ SELECT f1,
(10 rows)
SELECT EXTRACT(FORTNIGHT FROM INTERVAL '2 days'); -- error
-ERROR: interval units "fortnight" not recognized
+ERROR: unit "fortnight" not recognized for type interval
SELECT EXTRACT(TIMEZONE FROM INTERVAL '2 days'); -- error
-ERROR: interval units "timezone" not supported
+ERROR: unit "timezone" not supported for type interval
SELECT EXTRACT(DECADE FROM INTERVAL '100 y');
extract
---------
diff --git a/src/test/regress/expected/join_hash_optimizer.out b/src/test/regress/expected/join_hash_optimizer.out
index 053d0ef4898..72d3d68ee80 100644
--- a/src/test/regress/expected/join_hash_optimizer.out
+++ b/src/test/regress/expected/join_hash_optimizer.out
@@ -99,6 +99,7 @@ ANALYZE wide;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join simple s using (id);
QUERY PLAN
@@ -139,6 +140,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
@@ -180,6 +182,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
@@ -224,6 +227,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
explain (costs off)
select count(*) from simple r join simple s using (id);
@@ -265,6 +269,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
set local enable_parallel_hash = off;
explain (costs off)
@@ -307,6 +312,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '192kB';
+set local hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
set local enable_parallel_hash = on;
explain (costs off)
@@ -344,6 +350,13 @@ $$);
t | f
(1 row)
+-- parallel full multi-batch hash join
+select count(*) from simple r full outer join simple s using (id);
+ count
+-------
+ 60000
+(1 row)
+
rollback to settings;
-- The "bad" case: during execution we need to increase number of
-- batches; in this case we plan for 1 batch, and increase at least a
@@ -353,6 +366,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
explain (costs off)
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
@@ -391,6 +405,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
set local enable_parallel_hash = off;
explain (costs off)
@@ -430,6 +445,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '192kB';
+set local hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
set local enable_parallel_hash = on;
explain (costs off)
@@ -474,6 +490,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
@@ -517,6 +534,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
set local enable_parallel_hash = off;
explain (costs off)
@@ -561,6 +579,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '128kB';
+set local hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
set local enable_parallel_hash = on;
explain (costs off)
@@ -606,6 +625,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
+set local hash_mem_multiplier = 1.0;
set local parallel_leader_participation = off;
select * from hash_join_batches(
$$
@@ -638,12 +658,13 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
+set hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1;
- QUERY PLAN
+ QUERY PLAN
------------------------------------------------------------------------------------------------------------
Finalize Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
@@ -697,11 +718,12 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1;
- QUERY PLAN
+ QUERY PLAN
------------------------------------------------------------------------------------------------------------
Finalize Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
@@ -755,12 +777,13 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
+set hash_mem_multiplier = 1.0;
set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of work_mem
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1;
- QUERY PLAN
+ QUERY PLAN
------------------------------------------------------------------------------------------------------------
Finalize Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
@@ -814,11 +837,12 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
+set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1;
- QUERY PLAN
+ QUERY PLAN
------------------------------------------------------------------------------------------------------------
Finalize Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
@@ -891,7 +915,36 @@ select count(*) from simple r full outer join simple s using (id);
(1 row)
rollback to settings;
--- parallelism not possible with parallel-oblivious outer hash join
+-- parallelism not possible with parallel-oblivious full hash join
+savepoint settings;
+set enable_parallel_hash = off;
+set local max_parallel_workers_per_gather = 2;
+explain (costs off)
+ select count(*) from simple r full outer join simple s using (id);
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Finalize Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Partial Aggregate
+ -> Hash Full Join
+ Hash Cond: (r.id = s.id)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: r.id
+ -> Seq Scan on simple r
+ -> Hash
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: s.id
+ -> Seq Scan on simple s
+(13 rows)
+
+select count(*) from simple r full outer join simple s using (id);
+ count
+-------
+ 60000
+(1 row)
+
+rollback to settings;
+-- parallelism is possible with parallel-aware full hash join
savepoint settings;
set local max_parallel_workers_per_gather = 2;
explain (costs off)
@@ -920,7 +973,7 @@ select count(*) from simple r full outer join simple s using (id);
(1 row)
rollback to settings;
--- An full outer join where every record is not matched.
+-- A full outer join where every record is not matched.
-- non-parallel
savepoint settings;
set local max_parallel_workers_per_gather = 0;
@@ -950,7 +1003,36 @@ select count(*) from simple r full outer join simple s on (r.id = 0 - s.id);
(1 row)
rollback to settings;
--- parallelism not possible with parallel-oblivious outer hash join
+-- parallelism not possible with parallel-oblivious full hash join
+savepoint settings;
+set enable_parallel_hash = off;
+set local max_parallel_workers_per_gather = 2;
+explain (costs off)
+ select count(*) from simple r full outer join simple s on (r.id = 0 - s.id);
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Finalize Aggregate
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Partial Aggregate
+ -> Hash Full Join
+ Hash Cond: (r.id = (0 - s.id))
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: r.id
+ -> Seq Scan on simple r
+ -> Hash
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: (0 - s.id)
+ -> Seq Scan on simple s
+(13 rows)
+
+select count(*) from simple r full outer join simple s on (r.id = 0 - s.id);
+ count
+--------
+ 120000
+(1 row)
+
+rollback to settings;
+-- parallelism is possible with parallel-aware full hash join
savepoint settings;
set local max_parallel_workers_per_gather = 2;
explain (costs off)
@@ -994,6 +1076,7 @@ savepoint settings;
set max_parallel_workers_per_gather = 2;
set enable_parallel_hash = on;
set work_mem = '128kB';
+set hash_mem_multiplier = 1.0;
insert into wide select generate_series(3, 100) as id, rpad('', 320000, 'x') as t;
explain (costs off)
select length(max(s.t))
@@ -1064,6 +1147,44 @@ explain (costs off) select * from join_hash_t_small, join_hash_t_big where a = b
(7 rows)
rollback to settings;
+-- Hash join reuses the HOT status bit to indicate match status. This can only
+-- be guaranteed to produce correct results if all the hash join tuple match
+-- bits are reset before reuse. This is done upon loading them into the
+-- hashtable.
+SAVEPOINT settings;
+SET enable_parallel_hash = on;
+SET min_parallel_table_scan_size = 0;
+SET parallel_setup_cost = 0;
+SET parallel_tuple_cost = 0;
+CREATE TABLE hjtest_matchbits_t1(id int);
+CREATE TABLE hjtest_matchbits_t2(id int);
+INSERT INTO hjtest_matchbits_t1 VALUES (1);
+INSERT INTO hjtest_matchbits_t2 VALUES (2);
+-- Update should create a HOT tuple. If this status bit isn't cleared, we won't
+-- correctly emit the NULL-extended unmatching tuple in full hash join.
+UPDATE hjtest_matchbits_t2 set id = 2;
+SELECT * FROM hjtest_matchbits_t1 t1 FULL JOIN hjtest_matchbits_t2 t2 ON t1.id = t2.id
+ ORDER BY t1.id;
+ id | id
+----+----
+ | 2
+ 1 |
+(2 rows)
+
+-- Test serial full hash join.
+-- Resetting parallel_setup_cost should force a serial plan.
+-- Just to be safe, however, set enable_parallel_hash to off, as parallel full
+-- hash joins are only supported with shared hashtables.
+RESET parallel_setup_cost;
+SET enable_parallel_hash = off;
+SELECT * FROM hjtest_matchbits_t1 t1 FULL JOIN hjtest_matchbits_t2 t2 ON t1.id = t2.id;
+ id | id
+----+----
+ | 2
+ 1 |
+(2 rows)
+
+ROLLBACK TO settings;
rollback;
-- Verify that hash key expressions reference the correct
-- nodes. Hashjoin's hashkeys need to reference its outer plan, Hash's
@@ -1249,3 +1370,37 @@ WHERE
(1 row)
ROLLBACK;
+-- Verify that we behave sanely when the inner hash keys contain parameters
+-- (that is, outer or lateral references). This situation has to defeat
+-- re-use of the inner hash table across rescans.
+begin;
+set local enable_hashjoin = on;
+explain (costs off)
+select i8.q2, ss.* from
+int8_tbl i8,
+lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4
+ on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss;
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int8_tbl i8
+ -> Materialize
+ -> Sort
+ Sort Key: t1.fivethous, i4.f1
+ -> Hash Join
+ Hash Cond: (t1.fivethous = (i4.f1 + i8.q2))
+ -> Seq Scan on tenk1 t1
+ -> Hash
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int4_tbl i4
+(14 rows)
+
+select i8.q2, ss.* from
+int8_tbl i8,
+lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4
+ on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss;
+ERROR: illegal rescan of motion node: invalid plan (nodeMotion.c:1367)
+HINT: Likely caused by bad NL-join, try setting enable_nestloop to off
+rollback;
diff --git a/src/test/regress/expected/join_optimizer.out b/src/test/regress/expected/join_optimizer.out
index 70e0e7f3e8f..fd987628b10 100644
--- a/src/test/regress/expected/join_optimizer.out
+++ b/src/test/regress/expected/join_optimizer.out
@@ -1643,7 +1643,7 @@ SELECT * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t = 'one'; -- er
ERROR: invalid reference to FROM-clause entry for table "j1_tbl"
LINE 1: ... * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t =...
^
-HINT: There is an entry for table "j1_tbl", but it cannot be referenced from this part of the query.
+DETAIL: There is an entry for table "j1_tbl", but it cannot be referenced from this part of the query.
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.i = 1; -- ok
i | j | t | k
---+---+-----+----
@@ -1914,28 +1914,27 @@ select * from int4_tbl i4, tenk1 a
where exists(select * from tenk1 b
where a.twothousand = b.twothousand and a.fivethous <> b.fivethous)
and i4.f1 = a.tenthous;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
- -> HashAggregate
- Group Key: i4.f1, i4.ctid, i4.gp_segment_id, a.unique1, a.unique2, a.two, a.four, a.ten, a.twenty, a.hundred, a.thousand, a.twothousand, a.fivethous, a.tenthous, a.odd, a.even, a.stringu1, a.stringu2, a.string4, a.ctid, a.gp_segment_id
+ -> Hash Semi Join
+ Hash Cond: (a.twothousand = b.twothousand)
+ Join Filter: (a.fivethous <> b.fivethous)
-> Redistribute Motion 3:3 (slice2; segments: 3)
- Hash Key: i4.ctid, i4.gp_segment_id, a.ctid, a.gp_segment_id
- -> Hash Join
- Hash Cond: (b.twothousand = a.twothousand)
- Join Filter: (a.fivethous <> b.fivethous)
+ Hash Key: a.twothousand
+ -> Nested Loop
+ Join Filter: true
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int4_tbl i4
+ -> Index Scan using tenk1_thous_tenthous on tenk1 a
+ Index Cond: (tenthous = i4.f1)
+ Filter: (NOT (twothousand IS NULL))
+ -> Hash
+ -> Redistribute Motion 3:3 (slice4; segments: 3)
+ Hash Key: b.twothousand
-> Seq Scan on tenk1 b
- -> Hash
- -> Broadcast Motion 3:3 (slice3; segments: 3)
- -> Nested Loop
- Join Filter: true
- -> Broadcast Motion 3:3 (slice4; segments: 3)
- -> Seq Scan on int4_tbl i4
- -> Index Scan using tenk1_thous_tenthous on tenk1 a
- Index Cond: (tenthous = i4.f1)
- Filter: (NOT (twothousand IS NULL))
Optimizer: GPORCA
-(19 rows)
+(18 rows)
--
-- More complicated constructs
@@ -2287,17 +2286,16 @@ rollback;
--
explain (costs off)
select aa, bb, unique1, unique1
- from tenk1 right join b on aa = unique1
+ from tenk1 right join b_star on aa = unique1
where bb < bb and bb is null;
QUERY PLAN
-------------------------------------
Result
One-Time Filter: false
- Optimizer: Postgres query optimizer
(3 rows)
select aa, bb, unique1, unique1
- from tenk1 right join b on aa = unique1
+ from tenk1 right join b_star on aa = unique1
where bb < bb and bb is null;
aa | bb | unique1 | unique1
----+----+---------+---------
@@ -2348,8 +2346,8 @@ order by 1, 2;
(5 rows)
--
--- regression test: check a case where join_clause_is_movable_into() gives
--- an imprecise result, causing an assertion failure
+-- regression test: check a case where join_clause_is_movable_into()
+-- used to give an imprecise result, causing an assertion failure
--
select count(*)
from
@@ -2413,6 +2411,407 @@ where b.f1 = t.thousand and a.f1 = b.f1 and (a.f1+b.f1+999) = t.tenthous;
reset enable_hashjoin;
reset enable_nestloop;
+--
+-- checks for correct handling of quals in multiway outer joins
+--
+explain (costs off)
+select t1.f1
+from int4_tbl t1, int4_tbl t2
+ left join int4_tbl t3 on t3.f1 > 0
+ left join int4_tbl t4 on t3.f1 > 1
+where t4.f1 is null;
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ Join Filter: true
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on int4_tbl t1
+ -> Materialize
+ -> Result
+ Filter: (t4.f1 IS NULL)
+ -> Nested Loop Left Join
+ Join Filter: (t3.f1 > 1)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int4_tbl t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int4_tbl t3
+ Filter: (f1 > 0)
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int4_tbl t4
+ Optimizer: GPORCA
+(21 rows)
+
+select t1.f1
+from int4_tbl t1, int4_tbl t2
+ left join int4_tbl t3 on t3.f1 > 0
+ left join int4_tbl t4 on t3.f1 > 1
+where t4.f1 is null;
+ f1
+----
+(0 rows)
+
+explain (costs off)
+select *
+from int4_tbl t1 left join int4_tbl t2 on true
+ left join int4_tbl t3 on t2.f1 > 0
+ left join int4_tbl t4 on t3.f1 > 0;
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: (t3.f1 > 0)
+ -> Nested Loop Left Join
+ Join Filter: (t2.f1 > 0)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int4_tbl t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on int4_tbl t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int4_tbl t3
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int4_tbl t4
+ Optimizer: GPORCA
+(18 rows)
+
+explain (costs off)
+select * from onek t1
+ left join onek t2 on t1.unique1 = t2.unique1
+ left join onek t3 on t2.unique1 != t3.unique1
+ left join onek t4 on t3.unique1 = t4.unique1;
+ QUERY PLAN
+--------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Left Join
+ Hash Cond: (t3.unique1 = t4.unique1)
+ -> Nested Loop Left Join
+ Join Filter: (t2.unique1 <> t3.unique1)
+ -> Hash Left Join
+ Hash Cond: (t1.unique1 = t2.unique1)
+ -> Seq Scan on onek t1
+ -> Hash
+ -> Seq Scan on onek t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on onek t3
+ -> Hash
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on onek t4
+ Optimizer: GPORCA
+(17 rows)
+
+explain (costs off)
+select * from int4_tbl t1
+ left join (select now() from int4_tbl t2
+ left join int4_tbl t3 on t2.f1 = t3.f1
+ left join int4_tbl t4 on t3.f1 = t4.f1) s on true
+ inner join int4_tbl t5 on true;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Nested Loop
+ Join Filter: true
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int4_tbl t1
+ -> Seq Scan on int4_tbl t5
+ -> Materialize
+ -> Result
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Hash Left Join
+ Hash Cond: (t3.f1 = t4.f1)
+ -> Hash Left Join
+ Hash Cond: (t2.f1 = t3.f1)
+ -> Seq Scan on int4_tbl t2
+ -> Hash
+ -> Seq Scan on int4_tbl t3
+ -> Hash
+ -> Seq Scan on int4_tbl t4
+ Optimizer: GPORCA
+(21 rows)
+
+explain (costs off)
+select * from int4_tbl t1
+ left join int4_tbl t2 on true
+ left join int4_tbl t3 on true
+ left join int4_tbl t4 on t2.f1 = t3.f1;
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: (t2.f1 = t3.f1)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int4_tbl t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on int4_tbl t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int4_tbl t3
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int4_tbl t4
+ Optimizer: GPORCA
+(18 rows)
+
+explain (costs off)
+select * from int4_tbl t1
+ left join int4_tbl t2 on true
+ left join int4_tbl t3 on t2.f1 = t3.f1
+ left join int4_tbl t4 on t3.f1 != t4.f1;
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: (t3.f1 <> t4.f1)
+ -> Hash Left Join
+ Hash Cond: (t2.f1 = t3.f1)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int4_tbl t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int4_tbl t2
+ -> Hash
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on int4_tbl t3
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int4_tbl t4
+ Optimizer: GPORCA
+(18 rows)
+
+explain (costs off)
+select * from int4_tbl t1
+ left join (int4_tbl t2 left join int4_tbl t3 on t2.f1 > 0) on t2.f1 > 1
+ left join int4_tbl t4 on t2.f1 > 2 and t3.f1 > 3
+where t1.f1 = coalesce(t2.f1, 1);
+ QUERY PLAN
+--------------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: ((t2.f1 > 2) AND (t3.f1 > 3))
+ -> Result
+ Filter: (t1.f1 = COALESCE(t2.f1, 1))
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int4_tbl t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: (t2.f1 > 0)
+ -> Seq Scan on int4_tbl t2
+ Filter: (f1 > 1)
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on int4_tbl t3
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int4_tbl t4
+ Optimizer: GPORCA
+(21 rows)
+
+explain (costs off)
+select * from int4_tbl t1
+ left join ((select t2.f1 from int4_tbl t2
+ left join int4_tbl t3 on t2.f1 > 0
+ where t3.f1 is null) s
+ left join tenk1 t4 on s.f1 > 1)
+ on s.f1 = t1.f1;
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Right Join
+ Hash Cond: (t2.f1 = t1.f1)
+ -> Nested Loop Left Join
+ Join Filter: (t2.f1 > 1)
+ -> Result
+ Filter: (t3.f1 IS NULL)
+ -> Nested Loop Left Join
+ Join Filter: (t2.f1 > 0)
+ -> Seq Scan on int4_tbl t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int4_tbl t3
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on tenk1 t4
+ -> Hash
+ -> Seq Scan on int4_tbl t1
+ Optimizer: GPORCA
+(19 rows)
+
+explain (costs off)
+select * from int4_tbl t1
+ left join ((select t2.f1 from int4_tbl t2
+ left join int4_tbl t3 on t2.f1 > 0
+ where t2.f1 <> coalesce(t3.f1, -1)) s
+ left join tenk1 t4 on s.f1 > 1)
+ on s.f1 = t1.f1;
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Right Join
+ Hash Cond: (t2.f1 = t1.f1)
+ -> Nested Loop Left Join
+ Join Filter: (t2.f1 > 1)
+ -> Result
+ Filter: (t2.f1 <> COALESCE(t3.f1, '-1'::integer))
+ -> Nested Loop Left Join
+ Join Filter: (t2.f1 > 0)
+ -> Seq Scan on int4_tbl t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int4_tbl t3
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on tenk1 t4
+ -> Hash
+ -> Seq Scan on int4_tbl t1
+ Optimizer: GPORCA
+(19 rows)
+
+explain (costs off)
+select * from onek t1
+ left join onek t2 on t1.unique1 = t2.unique1
+ left join onek t3 on t2.unique1 = t3.unique1
+ left join onek t4 on t3.unique1 = t4.unique1 and t2.unique2 = t4.unique2;
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Left Join
+ Hash Cond: ((t3.unique1 = t4.unique1) AND (t2.unique2 = t4.unique2))
+ -> Hash Left Join
+ Hash Cond: (t2.unique1 = t3.unique1)
+ -> Hash Left Join
+ Hash Cond: (t1.unique1 = t2.unique1)
+ -> Seq Scan on onek t1
+ -> Hash
+ -> Seq Scan on onek t2
+ -> Hash
+ -> Seq Scan on onek t3
+ -> Hash
+ -> Seq Scan on onek t4
+ Optimizer: GPORCA
+(15 rows)
+
+explain (costs off)
+select * from int8_tbl t1 left join
+ (int8_tbl t2 left join int8_tbl t3 full join int8_tbl t4 on false on false)
+ left join int8_tbl t5 on t2.q1 = t5.q1
+on t2.q2 = 123;
+ QUERY PLAN
+--------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int8_tbl t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Hash Left Join
+ Hash Cond: (t2.q1 = t5.q1)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int8_tbl t2
+ Filter: (q2 = 123)
+ -> Result
+ One-Time Filter: false
+ -> Hash
+ -> Seq Scan on int8_tbl t5
+ Optimizer: GPORCA
+(17 rows)
+
+explain (costs off)
+select * from int8_tbl t1
+ left join int8_tbl t2 on true
+ left join lateral
+ (select * from int8_tbl t3 where t3.q1 = t2.q1 offset 0) s
+ on t2.q1 = 1;
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Nested Loop Left Join
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on int8_tbl t1
+ -> Materialize
+ -> Nested Loop Left Join
+ Join Filter: (t2.q1 = 1)
+ -> Gather Motion 3:1 (slice2; segments: 3)
+ -> Seq Scan on int8_tbl t2
+ -> Materialize
+ -> Result
+ Filter: (t3.q1 = t2.q1)
+ -> Materialize
+ -> Gather Motion 3:1 (slice3; segments: 3)
+ -> Seq Scan on int8_tbl t3
+ Optimizer: GPORCA
+(15 rows)
+
+explain (costs off)
+select * from int8_tbl t1
+ left join int8_tbl t2 on true
+ left join lateral
+ (select * from generate_series(t2.q1, 100)) s
+ on t2.q1 = 1;
+ QUERY PLAN
+---------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ -> Seq Scan on int8_tbl t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: (t2.q1 = 1)
+ -> Seq Scan on int8_tbl t2
+ -> Function Scan on generate_series
+ Optimizer: GPORCA
+(10 rows)
+
+explain (costs off)
+select * from int8_tbl t1
+ left join int8_tbl t2 on true
+ left join lateral
+ (select t2.q1 from int8_tbl t3) s
+ on t2.q1 = 1;
+ERROR: could not devise a query plan for the given query
+explain (costs off)
+select * from onek t1
+ left join onek t2 on true
+ left join lateral
+ (select * from onek t3 where t3.two = t2.two offset 0) s
+ on t2.unique1 = 1;
+ QUERY PLAN
+-----------------------------------------------------------------------------------
+ Nested Loop Left Join
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on onek t1
+ -> Materialize
+ -> Nested Loop Left Join
+ Join Filter: (t2.unique1 = 1)
+ -> Gather Motion 3:1 (slice2; segments: 3)
+ -> Seq Scan on onek t2
+ -> Materialize
+ -> Memoize
+ Cache Key: t2.two
+ Cache Mode: binary
+ -> Result
+ Filter: (t3.two = t2.two)
+ -> Materialize
+ -> Gather Motion 3:1 (slice3; segments: 3)
+ -> Seq Scan on onek t3
+ Optimizer: GPORCA
+(18 rows)
+
--
-- check a case where we formerly got confused by conflicting sort orders
-- in redundant merge join path keys
@@ -2504,6 +2903,42 @@ select count(*) from
reset enable_mergejoin;
reset enable_hashjoin;
+set enable_hashjoin = 0;
+set enable_nestloop = 0;
+set enable_hashagg = 0;
+--
+-- Check that we use the pathkeys from a prefix of the group by / order by
+-- clause for the join pathkeys when that prefix covers all join quals. We
+-- expect this to lead to an incremental sort for the group by / order by.
+--
+explain (costs off)
+select x.thousand, x.twothousand, count(*)
+from tenk1 x inner join tenk1 y on x.thousand = y.thousand
+group by x.thousand, x.twothousand
+order by x.thousand desc, x.twothousand;
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: x.thousand, x.twothousand
+ -> Sort
+ Sort Key: x.thousand DESC, x.twothousand
+ -> HashAggregate
+ Group Key: x.thousand, x.twothousand
+ -> Hash Join
+ Hash Cond: (x.thousand = y.thousand)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: x.thousand
+ -> Seq Scan on tenk1 x
+ -> Hash
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: y.thousand
+ -> Seq Scan on tenk1 y
+ Optimizer: GPORCA
+(16 rows)
+
+reset enable_hashagg;
+reset enable_nestloop;
+reset enable_hashjoin;
--
-- Clean up
--
@@ -2588,6 +3023,31 @@ select t1.*, t2.*, unnamed_join.* from
---+---+---+---+---+---
(0 rows)
+select foo.*, unnamed_join.* from
+ t1 join t2 using (a) as foo, t3 as unnamed_join
+ for update of unnamed_join;
+ a | x | y
+---+---+---
+(0 rows)
+
+select foo.*, unnamed_join.* from
+ t1 join t2 using (a) as foo, t3 as unnamed_join
+ for update of foo;
+ERROR: FOR UPDATE cannot be applied to a join
+LINE 3: for update of foo;
+ ^
+select bar.*, unnamed_join.* from
+ (t1 join t2 using (a) as foo) as bar, t3 as unnamed_join
+ for update of foo;
+ERROR: relation "foo" in FOR UPDATE clause not found in FROM clause
+LINE 3: for update of foo;
+ ^
+select bar.*, unnamed_join.* from
+ (t1 join t2 using (a) as foo) as bar, t3 as unnamed_join
+ for update of bar;
+ERROR: FOR UPDATE cannot be applied to a join
+LINE 3: for update of bar;
+ ^
--
-- regression test for 8.1 merge right join bug
--
@@ -2620,6 +3080,65 @@ select tt1.*, tt2.* from tt2 right join tt1 on tt1.joincol = tt2.joincol;
1 | 11 | 22 | 11
(3 rows)
+reset enable_hashjoin;
+reset enable_nestloop;
+--
+-- regression test for bug #18522 (merge-right-anti-join in inner_unique cases)
+--
+create temp table tbl_ra(a int unique, b int);
+insert into tbl_ra select i, i%100 from generate_series(1,1000)i;
+create index on tbl_ra (b);
+analyze tbl_ra;
+set enable_hashjoin to off;
+set enable_nestloop to off;
+-- ensure we get a merge right anti join
+explain (costs off)
+select * from tbl_ra t1
+where not exists (select 1 from tbl_ra t2 where t2.b = t1.a) and t1.b < 2;
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Result
+ Filter: (COALESCE((count(*)), '0'::bigint) = '0'::bigint)
+ -> Hash Left Join
+ Hash Cond: (t1.a = t2.b)
+ -> Index Scan using tbl_ra_b_idx on tbl_ra t1
+ Index Cond: (b < 2)
+ -> Hash
+ -> HashAggregate
+ Group Key: t2.b
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: t2.b
+ -> Seq Scan on tbl_ra t2
+ Optimizer: GPORCA
+(14 rows)
+
+-- and check we get the expected results
+select * from tbl_ra t1
+where not exists (select 1 from tbl_ra t2 where t2.b = t1.a) and t1.b < 2;
+ a | b
+------+---
+ 100 | 0
+ 101 | 1
+ 200 | 0
+ 201 | 1
+ 300 | 0
+ 301 | 1
+ 400 | 0
+ 401 | 1
+ 500 | 0
+ 501 | 1
+ 600 | 0
+ 601 | 1
+ 700 | 0
+ 701 | 1
+ 800 | 0
+ 801 | 1
+ 900 | 0
+ 901 | 1
+ 1000 | 0
+(19 rows)
+
reset enable_hashjoin;
reset enable_nestloop;
--
@@ -2666,28 +3185,150 @@ create temp table tt3(f1 int, f2 text);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'f1' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
insert into tt3 select x, repeat('xyzzy', 100) from generate_series(1,10000) x;
-create index tt3i on tt3(f1);
analyze tt3;
create temp table tt4(f1 int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'f1' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
insert into tt4 values (0),(1),(9999);
analyze tt4;
+set enable_nestloop to off;
+EXPLAIN (COSTS OFF)
SELECT a.f1
FROM tt4 a
LEFT JOIN (
SELECT b.f1
FROM tt3 b LEFT JOIN tt3 c ON (b.f1 = c.f1)
- WHERE c.f1 IS NULL
+ WHERE COALESCE(c.f1, 0) = 0
) AS d ON (a.f1 = d.f1)
-WHERE d.f1 IS NULL;
+WHERE COALESCE(d.f1, 0) = 0
+ORDER BY 1;
+ QUERY PLAN
+--------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Merge Key: a.f1
+ -> Sort
+ Sort Key: a.f1
+ -> Result
+ Filter: (COALESCE(b.f1, 0) = 0)
+ -> Hash Right Join
+ Hash Cond: (b.f1 = a.f1)
+ -> Result
+ Filter: (COALESCE(c.f1, 0) = 0)
+ -> Hash Left Join
+ Hash Cond: (b.f1 = c.f1)
+ -> Seq Scan on tt3 b
+ -> Hash
+ -> Seq Scan on tt3 c
+ -> Hash
+ -> Seq Scan on tt4 a
+ Optimizer: GPORCA
+(18 rows)
+
+SELECT a.f1
+FROM tt4 a
+LEFT JOIN (
+ SELECT b.f1
+ FROM tt3 b LEFT JOIN tt3 c ON (b.f1 = c.f1)
+ WHERE COALESCE(c.f1, 0) = 0
+) AS d ON (a.f1 = d.f1)
+WHERE COALESCE(d.f1, 0) = 0
+ORDER BY 1;
f1
------
- 9999
0
1
+ 9999
(3 rows)
+reset enable_nestloop;
+--
+-- basic semijoin and antijoin recognition tests
+--
+explain (costs off)
+select a.* from tenk1 a
+where unique1 in (select unique2 from tenk1 b);
+ QUERY PLAN
+----------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Semi Join
+ Hash Cond: (a.unique1 = b.unique2)
+ -> Seq Scan on tenk1 a
+ -> Hash
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: b.unique2
+ -> Seq Scan on tenk1 b
+ Optimizer: GPORCA
+(9 rows)
+
+-- sadly, this is not an antijoin
+explain (costs off)
+select a.* from tenk1 a
+where unique1 not in (select unique2 from tenk1 b);
+ QUERY PLAN
+--------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Left Anti Semi (Not-In) Join
+ Hash Cond: (a.unique1 = b.unique2)
+ -> Seq Scan on tenk1 a
+ -> Hash
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on tenk1 b
+ Optimizer: GPORCA
+(8 rows)
+
+explain (costs off)
+select a.* from tenk1 a
+where exists (select 1 from tenk1 b where a.unique1 = b.unique2);
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Semi Join
+ Hash Cond: (a.unique1 = b.unique2)
+ -> Index Scan using tenk1_unique1 on tenk1 a
+ Index Cond: (unique1 IS NOT NULL)
+ -> Hash
+ -> Result
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: b.unique2
+ -> Seq Scan on tenk1 b
+ Optimizer: GPORCA
+(11 rows)
+
+explain (costs off)
+select a.* from tenk1 a
+where not exists (select 1 from tenk1 b where a.unique1 = b.unique2);
+ QUERY PLAN
+------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Anti Join
+ Hash Cond: (a.unique1 = b.unique2)
+ -> Seq Scan on tenk1 a
+ -> Hash
+ -> Result
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: b.unique2
+ -> Seq Scan on tenk1 b
+ Optimizer: GPORCA
+(10 rows)
+
+explain (costs off)
+select a.* from tenk1 a left join tenk1 b on a.unique1 = b.unique2
+where b.unique2 is null;
+ QUERY PLAN
+------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Result
+ Filter: (b.unique2 IS NULL)
+ -> Hash Left Join
+ Hash Cond: (a.unique1 = b.unique2)
+ -> Seq Scan on tenk1 a
+ -> Hash
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: b.unique2
+ -> Seq Scan on tenk1 b
+ Optimizer: GPORCA
+(11 rows)
+
--
-- regression test for proper handling of outer joins within antijoins
--
@@ -3393,11 +4034,11 @@ select * from
tenk1 join int4_tbl on f1 = twothousand,
q1, q2
where thousand = (q1 + q2);
- QUERY PLAN
----------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- -> Hash Join
- Hash Cond: (tenk1.twothousand = int4_tbl.f1)
+ QUERY PLAN
+--------------------------------------------------------------
+ Hash Join
+ Hash Cond: (tenk1.twothousand = int4_tbl.f1)
+ -> Gather Motion 3:1 (slice1; segments: 3)
-> Nested Loop
Join Filter: true
-> Broadcast Motion 3:3 (slice2; segments: 3)
@@ -3408,10 +4049,10 @@ where thousand = (q1 + q2);
-> Seq Scan on q2
-> Index Scan using tenk1_thous_tenthous on tenk1
Index Cond: (thousand = (q1.q1 + q2.q2))
- -> Hash
- -> Broadcast Motion 3:3 (slice4; segments: 3)
- -> Seq Scan on int4_tbl
- Optimizer: Pivotal Optimizer (GPORCA)
+ -> Hash
+ -> Gather Motion 3:1 (slice4; segments: 3)
+ -> Seq Scan on int4_tbl
+ Optimizer: GPORCA
(17 rows)
--
@@ -3664,6 +4305,84 @@ where b;
0 | t | t
(2 rows)
+-- Test PHV in a semijoin qual, which confused useless-RTE removal (bug #17700)
+explain (verbose, costs off)
+with ctetable as not materialized ( select 1 as f1 )
+select * from ctetable c1
+where f1 in ( select c3.f1 from ctetable c2 full join ctetable c3 on true );
+ QUERY PLAN
+------------------------------------
+ Hash Semi Join
+ Output: (1)
+ Hash Cond: ((1) = (1))
+ -> Result
+ Output: 1
+ -> Hash
+ Output: (1)
+ -> Merge Full Join
+ Output: (1)
+ -> Result
+ Output: 1
+ -> Materialize
+ Output: (1)
+ -> Result
+ Output: 1
+ Optimizer: GPORCA
+(17 rows)
+
+with ctetable as not materialized ( select 1 as f1 )
+select * from ctetable c1
+where f1 in ( select c3.f1 from ctetable c2 full join ctetable c3 on true );
+ f1
+----
+ 1
+(1 row)
+
+-- Test PHV that winds up in a Result node, despite having nonempty nullingrels
+explain (verbose, costs off)
+select table_catalog, table_name
+from int4_tbl t1
+ inner join (int8_tbl t2
+ left join information_schema.column_udt_usage on null)
+ on null;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Result
+ Output: 'regression'::information_schema.sql_identifier, (c.relname)::information_schema.sql_identifier
+ One-Time Filter: false
+ Optimizer: GPORCA
+(5 rows)
+
+-- Test handling of qual pushdown to appendrel members with non-Var outputs
+explain (verbose, costs off)
+select * from int4_tbl left join (
+ select text 'foo' union all select text 'bar'
+) ss(x) on true
+where ss.x is null;
+ QUERY PLAN
+----------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Output: f1, ('foo'::text)
+ -> Result
+ Output: f1, ('foo'::text)
+ Filter: (('foo'::text) IS NULL)
+ -> Nested Loop Left Join
+ Output: f1, ('foo'::text)
+ Join Filter: true
+ -> Seq Scan on public.int4_tbl
+ Output: f1
+ -> Append
+ -> Materialize
+ Output: ('foo'::text)
+ -> Result
+ Output: 'foo'::text
+ -> Materialize
+ Output: ('bar'::text)
+ -> Result
+ Output: 'bar'::text
+ Optimizer: GPORCA
+(21 rows)
+
--
-- test inlining of immutable functions
--
@@ -3894,6 +4613,37 @@ select * from mki4(42);
drop function mki8(bigint, bigint);
drop function mki4(int);
+-- test const-folding of a whole-row Var into a per-field Var
+-- (need to inline a function to reach this case, else parser does it)
+create function f_field_select(t onek) returns int4 as
+$$ select t.unique2; $$ language sql immutable;
+explain (verbose, costs off)
+select (t2.*).unique1, f_field_select(t2) from tenk1 t1
+ left join onek t2 on t1.unique1 = t2.unique1
+ left join int8_tbl t3 on true;
+ QUERY PLAN
+--------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Output: t2.unique1, t2.unique2
+ -> Hash Left Join
+ Output: t2.unique1, t2.unique2
+ Hash Cond: (t1.unique1 = t2.unique1)
+ -> Nested Loop Left Join
+ Output: t1.unique1
+ Join Filter: true
+ -> Seq Scan on public.tenk1 t1
+ Output: t1.unique1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on public.int8_tbl t3
+ -> Hash
+ Output: t2.unique1, t2.unique2
+ -> Seq Scan on public.onek t2
+ Output: t2.unique1, t2.unique2
+ Optimizer: GPORCA
+(19 rows)
+
+drop function f_field_select(t onek);
--
-- test extraction of restriction OR clauses from join OR clause
-- (we used to only do this for indexable clauses)
@@ -4071,18 +4821,18 @@ select count(*) from
Join Filter: (a.unique2 = b.unique1)
-> Redistribute Motion 3:3 (slice2; segments: 3)
Hash Key: a.thousand
- -> Nested Loop
- Join Filter: true
- -> Redistribute Motion 3:3 (slice3; segments: 3)
- Hash Key: b.unique2
- -> Nested Loop
- Join Filter: true
- -> Broadcast Motion 3:3 (slice4; segments: 3)
- -> Seq Scan on int4_tbl
- -> Index Scan using tenk1_thous_tenthous on tenk1 b
- Index Cond: (thousand = int4_tbl.f1)
- -> Index Scan using tenk1_unique1 on tenk1 a
- Index Cond: (unique1 = b.unique2)
+ -> Hash Join
+ Hash Cond: (b.thousand = int4_tbl.f1)
+ -> Hash Join
+ Hash Cond: (a.unique1 = b.unique2)
+ -> Seq Scan on tenk1 a
+ -> Hash
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: b.unique2
+ -> Seq Scan on tenk1 b
+ -> Hash
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on int4_tbl
-> Hash
-> Redistribute Motion 3:3 (slice5; segments: 3)
Hash Key: c.thousand
@@ -4121,18 +4871,18 @@ select b.unique1 from
Join Filter: (b.unique1 = 42)
-> Redistribute Motion 3:3 (slice3; segments: 3)
Hash Key: a.thousand
- -> Nested Loop
- Join Filter: true
- -> Redistribute Motion 3:3 (slice4; segments: 3)
- Hash Key: b.unique2
- -> Nested Loop
- Join Filter: true
- -> Broadcast Motion 3:3 (slice5; segments: 3)
- -> Seq Scan on int4_tbl i1
- -> Index Scan using tenk1_thous_tenthous on tenk1 b
- Index Cond: (thousand = i1.f1)
- -> Index Scan using tenk1_unique1 on tenk1 a
- Index Cond: (unique1 = b.unique2)
+ -> Hash Join
+ Hash Cond: (b.thousand = i1.f1)
+ -> Hash Join
+ Hash Cond: (b.unique2 = a.unique1)
+ -> Redistribute Motion 3:3 (slice4; segments: 3)
+ Hash Key: b.unique2
+ -> Seq Scan on tenk1 b
+ -> Hash
+ -> Seq Scan on tenk1 a
+ -> Hash
+ -> Broadcast Motion 3:3 (slice5; segments: 3)
+ -> Seq Scan on int4_tbl i1
-> Hash
-> Redistribute Motion 3:3 (slice6; segments: 3)
Hash Key: c.thousand
@@ -4320,6 +5070,44 @@ select a.unique1, b.unique1, c.unique1, coalesce(b.twothousand, a.twothousand)
---------+---------+---------+----------
(0 rows)
+-- related case
+explain (costs off)
+select * from int8_tbl t1 left join int8_tbl t2 on t1.q2 = t2.q1,
+ lateral (select * from int8_tbl t3 where t2.q1 = t2.q2) ss;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ -> Hash Left Join
+ Hash Cond: (t1.q2 = t2.q1)
+ Filter: (t2.q1 = t2.q2)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: t1.q2
+ -> Seq Scan on int8_tbl t1
+ -> Hash
+ -> Seq Scan on int8_tbl t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int8_tbl t3
+ Optimizer: GPORCA
+(14 rows)
+
+select * from int8_tbl t1 left join int8_tbl t2 on t1.q2 = t2.q1,
+ lateral (select * from int8_tbl t3 where t2.q1 = t2.q2) ss;
+ q1 | q2 | q1 | q2 | q1 | q2
+------------------+------------------+------------------+------------------+------------------+-------------------
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 456
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 456
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789
+(10 rows)
+
--
-- check handling of join aliases when flattening multiple levels of subquery
--
@@ -4382,6 +5170,67 @@ using (join_key);
1 | |
(2 rows)
+--
+-- check handling of a variable-free join alias
+--
+explain (verbose, costs off)
+select * from
+int4_tbl i0 left join
+( (select *, 123 as x from int4_tbl i1) ss1
+ left join
+ (select *, q2 as x from int8_tbl i2) ss2
+ using (x)
+) ss0
+on (i0.f1 = ss0.f1)
+order by i0.f1, x;
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Output: i0.f1, (((123))::bigint), i1.f1, i2.q1, i2.q2
+ Merge Key: i0.f1, (((123))::bigint)
+ -> Sort
+ Output: i0.f1, (((123))::bigint), i1.f1, i2.q1, i2.q2
+ Sort Key: i0.f1, (((123))::bigint)
+ -> Hash Left Join
+ Output: i0.f1, (123), i1.f1, i2.q1, i2.q2
+ Hash Cond: (i0.f1 = i1.f1)
+ -> Seq Scan on public.int4_tbl i0
+ Output: i0.f1
+ -> Hash
+ Output: i1.f1, (123), i2.q1, i2.q2
+ -> Hash Left Join
+ Output: i1.f1, (123), i2.q1, i2.q2
+ Hash Cond: (((123))::bigint = i2.q2)
+ -> Seq Scan on public.int4_tbl i1
+ Output: 123, i1.f1
+ -> Hash
+ Output: i2.q1, i2.q2
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ Output: i2.q1, i2.q2
+ -> Seq Scan on public.int8_tbl i2
+ Output: i2.q1, i2.q2
+ Filter: (i2.q2 = 123)
+ Optimizer: GPORCA
+(27 rows)
+
+select * from
+int4_tbl i0 left join
+( (select *, 123 as x from int4_tbl i1) ss1
+ left join
+ (select *, q2 as x from int8_tbl i2) ss2
+ using (x)
+) ss0
+on (i0.f1 = ss0.f1)
+order by i0.f1, x;
+ f1 | x | f1 | q1 | q2
+-------------+-----+-------------+------------------+-----
+ 0 | 123 | 0 | 4567890123456789 | 123
+ 123456 | 123 | 123456 | 4567890123456789 | 123
+ -123456 | 123 | -123456 | 4567890123456789 | 123
+ 2147483647 | 123 | 2147483647 | 4567890123456789 | 123
+ -2147483647 | 123 | -2147483647 | 4567890123456789 | 123
+(5 rows)
+
--
-- test successful handling of nested outer joins with degenerate join quals
--
@@ -4692,6 +5541,62 @@ select * from
doh! | 123 | 456 | hi de ho neighbor |
(2 rows)
+-- check handling of a variable-free qual for a non-commutable outer join
+explain (costs off)
+select nspname
+from (select 1 as x) ss1
+left join
+( select n.nspname, c.relname
+ from pg_class c left join pg_namespace n on n.oid = c.relnamespace
+ where c.relkind = 'r'
+) ss2 on false;
+ QUERY PLAN
+-------------------------------
+ Nested Loop Left Join
+ Join Filter: false
+ -> Result
+ -> Result
+ One-Time Filter: false
+ Optimizer: GPORCA
+(6 rows)
+
+-- check handling of apparently-commutable outer joins with non-commutable
+-- joins between them
+explain (costs off)
+select 1 from
+ int4_tbl i4
+ left join int8_tbl i8 on i4.f1 is not null
+ left join (select 1 as a) ss1 on null
+ join int4_tbl i42 on ss1.a is null or i8.q1 <> i8.q2
+ right join (select 2 as b) ss2
+ on ss2.b < i4.f1;
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ Nested Loop Left Join
+ Join Filter: ((2) < i4.f1)
+ -> Result
+ -> Materialize
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ Join Filter: true
+ -> Result
+ Filter: (((NULL::integer) IS NULL) OR (i8.q1 <> i8.q2))
+ -> Nested Loop Left Join
+ Join Filter: (NOT (i4.f1 IS NULL))
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int4_tbl i4
+ -> Result
+ One-Time Filter: false
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int8_tbl i8
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int4_tbl i42
+ Optimizer: GPORCA
+(23 rows)
+
--
-- test for appropriate join order in the presence of lateral references
--
@@ -4750,6 +5655,57 @@ select 1 from
where tt1.f1 = ss1.c0;
*/
--end_ignore
+explain (verbose, costs off)
+select 1 from
+ int4_tbl as i4
+ inner join
+ ((select 42 as n from int4_tbl x1 left join int8_tbl x2 on f1 = q1) as ss1
+ right join (select 1 as z) as ss2 on true)
+ on false,
+ lateral (select i4.f1, ss1.n from int8_tbl as i8 limit 1) as ss3;
+ QUERY PLAN
+-------------------------
+ Result
+ Output: 1
+ One-Time Filter: false
+ Optimizer: GPORCA
+(5 rows)
+
+select 1 from
+ int4_tbl as i4
+ inner join
+ ((select 42 as n from int4_tbl x1 left join int8_tbl x2 on f1 = q1) as ss1
+ right join (select 1 as z) as ss2 on true)
+ on false,
+ lateral (select i4.f1, ss1.n from int8_tbl as i8 limit 1) as ss3;
+ ?column?
+----------
+(0 rows)
+
+--
+-- check a case where we formerly generated invalid parameterized paths
+--
+begin;
+create temp table t (a int unique);
+explain (costs off)
+select 1 from t t1
+ join lateral (select t1.a from (select 1) foo offset 0) as s1 on true
+ join
+ (select 1 from t t2
+ inner join (t t3
+ left join (t t4 left join t t5 on t4.a = 1)
+ on t3.a = t4.a)
+ on false
+ where t3.a = coalesce(t5.a,1)) as s2
+ on true;
+ QUERY PLAN
+-------------------------
+ Result
+ One-Time Filter: false
+ Optimizer: GPORCA
+(3 rows)
+
+rollback;
--
-- check a case in which a PlaceHolderVar forces join order
--
@@ -4952,6 +5908,154 @@ select a.q2, b.q1
reset enable_hashjoin;
reset enable_nestloop;
reset enable_mergejoin;
+--
+-- test join strength reduction with a SubPlan providing the proof
+--
+explain (costs off)
+select a.unique1, b.unique2
+ from onek a left join onek b on a.unique1 = b.unique2
+ where b.unique2 = any (select q1 from int8_tbl c where c.q1 < b.unique1);
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Semi Join
+ Hash Cond: ((b.unique2)::bigint = c.q1)
+ Join Filter: (c.q1 < b.unique1)
+ -> Hash Left Join
+ Hash Cond: (a.unique1 = b.unique2)
+ -> Seq Scan on onek a
+ -> Hash
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: b.unique2
+ -> Seq Scan on onek b
+ -> Hash
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on int8_tbl c
+ Optimizer: GPORCA
+(15 rows)
+
+select a.unique1, b.unique2
+ from onek a left join onek b on a.unique1 = b.unique2
+ where b.unique2 = any (select q1 from int8_tbl c where c.q1 < b.unique1);
+ unique1 | unique2
+---------+---------
+ 123 | 123
+(1 row)
+
+--
+-- test full-join strength reduction
+--
+explain (costs off)
+select a.unique1, b.unique2
+ from onek a full join onek b on a.unique1 = b.unique2
+ where a.unique1 = 42;
+ QUERY PLAN
+---------------------------------------------------------------
+ Hash Left Join
+ Hash Cond: (a.unique1 = b.unique2)
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Index Only Scan using onek_unique1 on onek a
+ Index Cond: (unique1 = 42)
+ -> Hash
+ -> Gather Motion 3:1 (slice2; segments: 3)
+ -> Index Only Scan using onek_unique2 on onek b
+ Index Cond: (unique2 = 42)
+ Optimizer: GPORCA
+(10 rows)
+
+select a.unique1, b.unique2
+ from onek a full join onek b on a.unique1 = b.unique2
+ where a.unique1 = 42;
+ unique1 | unique2
+---------+---------
+ 42 | 42
+(1 row)
+
+explain (costs off)
+select a.unique1, b.unique2
+ from onek a full join onek b on a.unique1 = b.unique2
+ where b.unique2 = 43;
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Result
+ Filter: (b.unique2 = 43)
+ -> Hash Full Join
+ Hash Cond: (a.unique1 = b.unique2)
+ -> Seq Scan on onek a
+ -> Hash
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: b.unique2
+ -> Seq Scan on onek b
+ Optimizer: GPORCA
+(11 rows)
+
+select a.unique1, b.unique2
+ from onek a full join onek b on a.unique1 = b.unique2
+ where b.unique2 = 43;
+ unique1 | unique2
+---------+---------
+ 43 | 43
+(1 row)
+
+explain (costs off)
+select a.unique1, b.unique2
+ from onek a full join onek b on a.unique1 = b.unique2
+ where a.unique1 = 42 and b.unique2 = 42;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ Join Filter: true
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: b.unique2
+ -> Index Only Scan using onek_unique2 on onek b
+ Index Cond: (unique2 = 42)
+ -> Index Only Scan using onek_unique1 on onek a
+ Index Cond: ((unique1 = b.unique2) AND (unique1 = 42))
+ Optimizer: GPORCA
+(10 rows)
+
+select a.unique1, b.unique2
+ from onek a full join onek b on a.unique1 = b.unique2
+ where a.unique1 = 42 and b.unique2 = 42;
+ unique1 | unique2
+---------+---------
+ 42 | 42
+(1 row)
+
+--
+-- test result-RTE removal underneath a full join
+--
+explain (costs off)
+select * from
+ (select * from int8_tbl i81 join (values(123,2)) v(v1,v2) on q2=v1) ss1
+full join
+ (select * from (values(456,2)) w(v1,v2) join int8_tbl i82 on q2=v1) ss2
+on true;
+ QUERY PLAN
+-----------------------------------------------------
+ Merge Full Join
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on int8_tbl i81
+ Filter: (q2 = 123)
+ -> Materialize
+ -> Gather Motion 3:1 (slice2; segments: 3)
+ -> Seq Scan on int8_tbl i82
+ Filter: (q2 = 456)
+ Optimizer: GPORCA
+(9 rows)
+
+select * from
+ (select * from int8_tbl i81 join (values(123,2)) v(v1,v2) on q2=v1) ss1
+full join
+ (select * from (values(456,2)) w(v1,v2) join int8_tbl i82 on q2=v1) ss2
+on true;
+ q1 | q2 | v1 | v2 | v1 | v2 | q1 | q2
+------------------+-----+-----+----+-----+----+-----+-----
+ 4567890123456789 | 123 | 123 | 2 | 456 | 2 | 123 | 456
+(1 row)
+
--
-- test join removal
--
@@ -4979,39 +6083,297 @@ explain (costs off) SELECT a.* FROM a LEFT JOIN b ON a.b_id = b.id;
Optimizer: Pivotal Optimizer (GPORCA)
(3 rows)
-explain (costs off) SELECT b.* FROM b LEFT JOIN c ON b.c_id = c.id;
- QUERY PLAN
-------------------------------------------
+explain (costs off) SELECT b.* FROM b LEFT JOIN c ON b.c_id = c.id;
+ QUERY PLAN
+------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on b
+ Optimizer: Pivotal Optimizer (GPORCA)
+(3 rows)
+
+explain (costs off)
+ SELECT a.* FROM a LEFT JOIN (b left join c on b.c_id = c.id)
+ ON (a.b_id = b.id);
+ QUERY PLAN
+------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on a
+ Optimizer: Pivotal Optimizer (GPORCA)
+(3 rows)
+
+-- check optimization of outer join within another special join
+explain (costs off)
+select id from a where id in (
+ select b.id from b left join c on b.id = c.id
+);
+ QUERY PLAN
+------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ Join Filter: true
+ -> Seq Scan on a
+ -> Index Scan using b_pkey on b
+ Index Cond: (id = a.id)
+ Optimizer: GPORCA
+(7 rows)
+
+-- check optimization with oddly-nested outer joins
+explain (costs off)
+select a1.id from
+ (a a1 left join a a2 on true)
+ left join
+ (a a3 left join a a4 on a3.id = a4.id)
+ on a2.id = a3.id;
+ QUERY PLAN
+--------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on a a1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on a a2
+ Optimizer: GPORCA
+(8 rows)
+
+explain (costs off)
+select a1.id from
+ (a a1 left join a a2 on a1.id = a2.id)
+ left join
+ (a a3 left join a a4 on a3.id = a4.id)
+ on a2.id = a3.id;
+ QUERY PLAN
+--------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on a a1
+ -> Index Scan using a_pkey on a a2
+ Index Cond: (id = a1.id)
+ Optimizer: GPORCA
+(7 rows)
+
+explain (costs off)
+select 1 from a t1
+ left join a t2 on true
+ inner join a t3 on true
+ left join a t4 on t2.id = t4.id and t2.id = t3.id;
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ Result
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: t2.id
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Nested Loop
+ Join Filter: true
+ -> Seq Scan on a t3
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on a t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on a t2
+ -> Index Only Scan using a_pkey on a t4
+ Index Cond: (id = t2.id)
+ Filter: (t2.id = t3.id)
+ Optimizer: GPORCA
+(21 rows)
+
+-- another example (bug #17781)
+explain (costs off)
+select ss1.f1
+from int4_tbl as t1
+ left join (int4_tbl as t2
+ right join int4_tbl as t3 on null
+ left join (int4_tbl as t4
+ right join int8_tbl as t5 on null)
+ on t2.f1 = t4.f1
+ left join ((select null as f1 from int4_tbl as t6) as ss1
+ inner join int8_tbl as t7 on null)
+ on t5.q1 = t7.q2)
+ on false;
+ QUERY PLAN
+-----------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int4_tbl t1
+ -> Result
+ One-Time Filter: false
+ Optimizer: GPORCA
+(7 rows)
+
+-- variant with Var rather than PHV coming from t6
+explain (costs off)
+select ss1.f1
+from int4_tbl as t1
+ left join (int4_tbl as t2
+ right join int4_tbl as t3 on null
+ left join (int4_tbl as t4
+ right join int8_tbl as t5 on null)
+ on t2.f1 = t4.f1
+ left join ((select f1 from int4_tbl as t6) as ss1
+ inner join int8_tbl as t7 on null)
+ on t5.q1 = t7.q2)
+ on false;
+ QUERY PLAN
+-----------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int4_tbl t1
+ -> Result
+ One-Time Filter: false
+ Optimizer: GPORCA
+(7 rows)
+
+-- per further discussion of bug #17781
+explain (costs off)
+select ss1.x
+from (select f1/2 as x from int4_tbl i4 left join a on a.id = i4.f1) ss1
+ right join int8_tbl i8 on true
+where current_user is not null; -- this is to add a Result node
+ QUERY PLAN
+--------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on int8_tbl i8
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int4_tbl i4
+ Optimizer: GPORCA
+(8 rows)
+
+-- and further discussion of bug #17781
+explain (costs off)
+select *
+from int8_tbl t1
+ left join (int8_tbl t2 left join onek t3 on t2.q1 > t3.unique1)
+ on t1.q2 = t2.q2
+ left join onek t4
+ on t2.q2 < t3.unique2;
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: (t2.q2 < t3.unique2)
+ -> Hash Right Join
+ Hash Cond: (t2.q2 = t1.q2)
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: t2.q2
+ -> Nested Loop Left Join
+ Join Filter: (t2.q1 > t3.unique1)
+ -> Seq Scan on int8_tbl t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on onek t3
+ -> Hash
+ -> Redistribute Motion 3:3 (slice5; segments: 3)
+ Hash Key: t1.q2
+ -> Seq Scan on int8_tbl t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on onek t4
+ Optimizer: GPORCA
+(21 rows)
+
+-- More tests of correct placement of pseudoconstant quals
+-- simple constant-false condition
+explain (costs off)
+select * from int8_tbl t1 left join
+ (int8_tbl t2 inner join int8_tbl t3 on false
+ left join int8_tbl t4 on t2.q2 = t4.q2)
+on t1.q1 = t2.q1;
+ QUERY PLAN
+-------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
- -> Seq Scan on b
- Optimizer: Pivotal Optimizer (GPORCA)
-(3 rows)
+ -> Hash Left Join
+ Hash Cond: (q1 = (NULL::bigint))
+ -> Seq Scan on int8_tbl t1
+ -> Hash
+ -> Result
+ One-Time Filter: false
+ Optimizer: GPORCA
+(8 rows)
+-- deduce constant-false from an EquivalenceClass
explain (costs off)
- SELECT a.* FROM a LEFT JOIN (b left join c on b.c_id = c.id)
- ON (a.b_id = b.id);
- QUERY PLAN
-------------------------------------------
+select * from int8_tbl t1 left join
+ (int8_tbl t2 inner join int8_tbl t3 on (t2.q1-t3.q2) = 0 and (t2.q1-t3.q2) = 1
+ left join int8_tbl t4 on t2.q2 = t4.q2)
+on t1.q1 = t2.q1;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
- -> Seq Scan on a
- Optimizer: Pivotal Optimizer (GPORCA)
-(3 rows)
+ -> Hash Right Join
+ Hash Cond: (t2.q1 = t1.q1)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: t2.q1
+ -> Hash Left Join
+ Hash Cond: (t2.q2 = t4.q2)
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: t2.q2
+ -> Nested Loop
+ Join Filter: (((t2.q1 - t3.q2) = 0) AND ((t2.q1 - t3.q2) = 1))
+ -> Seq Scan on int8_tbl t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on int8_tbl t3
+ -> Hash
+ -> Redistribute Motion 3:3 (slice5; segments: 3)
+ Hash Key: t4.q2
+ -> Seq Scan on int8_tbl t4
+ -> Hash
+ -> Seq Scan on int8_tbl t1
+ Optimizer: GPORCA
+(22 rows)
--- check optimization of outer join within another special join
+-- pseudoconstant based on an outer-level Param
explain (costs off)
-select id from a where id in (
- select b.id from b left join c on b.id = c.id
-);
- QUERY PLAN
-------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- -> Nested Loop
- Join Filter: true
- -> Seq Scan on a
- -> Index Scan using b_pkey on b
- Index Cond: (id = a.id)
- Optimizer: Pivotal Optimizer (GPORCA)
-(7 rows)
+select exists(
+ select * from int8_tbl t1 left join
+ (int8_tbl t2 inner join int8_tbl t3 on x0.f1 = 1
+ left join int8_tbl t4 on t2.q2 = t4.q2)
+ on t1.q1 = t2.q1
+) from int4_tbl x0;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------
+ Result
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on int4_tbl x0
+ SubPlan 1
+ -> Aggregate
+ -> Nested Loop Left Join
+ Join Filter: (t1.q1 = t2.q1)
+ -> Materialize
+ -> Gather Motion 3:1 (slice6; segments: 3)
+ -> Seq Scan on int8_tbl t1
+ -> Materialize
+ -> Result
+ One-Time Filter: (x0.f1 = 1)
+ -> Materialize
+ -> Gather Motion 3:1 (slice2; segments: 3)
+ -> Hash Left Join
+ Hash Cond: (t2.q2 = t4.q2)
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: t2.q2
+ -> Nested Loop
+ Join Filter: true
+ -> Seq Scan on int8_tbl t2
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice4; segments: 3)
+ -> Seq Scan on int8_tbl t3
+ -> Hash
+ -> Redistribute Motion 3:3 (slice5; segments: 3)
+ Hash Key: t4.q2
+ -> Seq Scan on int8_tbl t4
+ Optimizer: GPORCA
+(30 rows)
-- check that join removal works for a left join when joining a subquery
-- that is guaranteed to be unique by its GROUP BY clause
@@ -5066,9 +6428,32 @@ select d.* from d left join (select distinct * from b) s
-> Seq Scan on d
-> Index Scan using b_pkey on b
Index Cond: (id = d.a)
- Optimizer: Pivotal Optimizer (GPORCA)
+ Optimizer: GPORCA
(7 rows)
+-- join removal is not possible here
+explain (costs off)
+select 1 from a t1
+ left join (a t2 left join a t3 on t2.id = 1) on t2.id = 1;
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ Result
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on a t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Nested Loop Left Join
+ Join Filter: (t2.id = 1)
+ -> Index Scan using a_pkey on a t2
+ Index Cond: (id = 1)
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on a t3
+ Optimizer: GPORCA
+(15 rows)
+
-- check join removal works when uniqueness of the join condition is enforced
-- by a UNION
explain (costs off)
@@ -5103,9 +6488,42 @@ select 1 from (select a.id FROM a left join b on a.b_id = b.id) q,
-> Seq Scan on a
-> Function Scan on generate_series gs
Filter: (a.id = i)
- Optimizer: Postgres query optimizer
+ Optimizer: GPORCA
(6 rows)
+-- check join removal within RHS of an outer join
+explain (costs off)
+select c.id, ss.a from c
+ left join (select d.a from onerow, d left join b on d.a = b.id) ss
+ on c.id = ss.a;
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Left Join
+ Hash Cond: (c.id = d.a)
+ -> Seq Scan on c
+ -> Hash
+ -> Nested Loop
+ Join Filter: true
+ -> Seq Scan on d
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on onerow
+ Optimizer: GPORCA
+(12 rows)
+
+CREATE TEMP TABLE parted_b (id int PRIMARY KEY) partition by range(id);
+CREATE TEMP TABLE parted_b1 partition of parted_b for values from (0) to (10);
+-- test join removals on a partitioned table
+explain (costs off)
+select a.* from a left join parted_b pb on a.b_id = pb.id;
+ QUERY PLAN
+-----------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on a
+ Optimizer: GPORCA
+(3 rows)
+
rollback;
create temp table parent (k int primary key, pd int);
create temp table child (k int unique, cd int);
@@ -5230,6 +6648,56 @@ SELECT * FROM
1 | 4567890123456789 | -4567890123456789 | 4567890123456789
(5 rows)
+-- join removal bug #17769: can't remove if there's a pushed-down reference
+EXPLAIN (COSTS OFF)
+SELECT q2 FROM
+ (SELECT *
+ FROM int8_tbl LEFT JOIN innertab ON q2 = id) ss
+ WHERE COALESCE(dat1, 0) = q1;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Result
+ Filter: (COALESCE(innertab.dat1, '0'::bigint) = int8_tbl.q1)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: int8_tbl.q2
+ -> Seq Scan on int8_tbl
+ -> Index Scan using innertab_pkey on innertab
+ Index Cond: (id = int8_tbl.q2)
+ Optimizer: GPORCA
+(11 rows)
+
+-- join removal bug #17773: otherwise-removable PHV appears in a qual condition
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT q2 FROM
+ (SELECT q2, 'constant'::text AS x
+ FROM int8_tbl LEFT JOIN innertab ON q2 = id) ss
+ RIGHT JOIN int4_tbl ON NULL
+ WHERE x >= x;
+ QUERY PLAN
+-------------------------
+ Result
+ Output: NULL::bigint
+ One-Time Filter: false
+ Optimizer: GPORCA
+(5 rows)
+
+-- join removal bug #17786: check that OR conditions are cleaned up
+EXPLAIN (COSTS OFF)
+SELECT f1, x
+FROM int4_tbl
+ JOIN ((SELECT 42 AS x FROM int8_tbl LEFT JOIN innertab ON q1 = id) AS ss1
+ RIGHT JOIN tenk1 ON NULL)
+ ON tenk1.unique1 = ss1.x OR tenk1.unique2 = ss1.x;
+ QUERY PLAN
+-------------------------
+ Result
+ One-Time Filter: false
+ Optimizer: GPORCA
+(3 rows)
+
rollback;
-- another join removal bug: we must clean up correctly when removing a PHV
begin;
@@ -5269,19 +6737,18 @@ where ss.stringu2 !~* ss.case1;
Gather Motion 3:1 (slice1; segments: 3)
-> Hash Join
Hash Cond: ((CASE t1.ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END) = t0.f1)
- -> Redistribute Motion 3:3 (slice2; segments: 3)
- Hash Key: (CASE t1.ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END)
- -> Nested Loop
- Join Filter: true
- -> Broadcast Motion 3:3 (slice3; segments: 3)
- -> Seq Scan on int4_tbl i4
- -> Index Scan using tenk1_unique2 on tenk1 t1
- Index Cond: (unique2 = i4.f1)
- Filter: (stringu2 !~* CASE ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END)
+ -> Nested Loop
+ Join Filter: true
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int4_tbl i4
+ -> Index Scan using tenk1_unique2 on tenk1 t1
+ Index Cond: (unique2 = i4.f1)
+ Filter: (stringu2 !~* CASE ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END)
-> Hash
- -> Seq Scan on text_tbl t0
- Optimizer: Pivotal Optimizer (GPORCA)
-(15 rows)
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on text_tbl t0
+ Optimizer: GPORCA
+(14 rows)
select t0.*
from
@@ -5299,6 +6766,115 @@ where ss.stringu2 !~* ss.case1;
doh!
(1 row)
+rollback;
+-- another join removal bug: we must clean up EquivalenceClasses too
+begin;
+create temp table t (a int unique);
+insert into t values (1);
+explain (costs off)
+select 1
+from t t1
+ left join (select 2 as c
+ from t t2 left join t t3 on t2.a = t3.a) s
+ on true
+where t1.a = s.c;
+ QUERY PLAN
+--------------------------------------------------------------------
+ Result
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Hash Join
+ Hash Cond: ((2) = t1.a)
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on t t2
+ Filter: (2 = 2)
+ -> Index Scan using t_a_key on t t3
+ Index Cond: (a = t2.a)
+ -> Hash
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Index Scan using t_a_key on t t1
+ Index Cond: (a = 2)
+ Optimizer: GPORCA
+(15 rows)
+
+select 1
+from t t1
+ left join (select 2 as c
+ from t t2 left join t t3 on t2.a = t3.a) s
+ on true
+where t1.a = s.c;
+ ?column?
+----------
+(0 rows)
+
+rollback;
+-- test cases where we can remove a join, but not a PHV computed at it
+begin;
+create temp table t (a int unique, b int);
+insert into t values (1,1), (2,2);
+explain (costs off)
+select 1
+from t t1
+ left join (select t2.a, 1 as c
+ from t t2 left join t t3 on t2.a = t3.a) s
+ on true
+ left join t t4 on true
+where s.a < s.c;
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Result
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ Join Filter: true
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Result
+ Filter: (t2.a < (1))
+ -> Seq Scan on t t2
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on t t1
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on t t4
+ Optimizer: GPORCA
+(15 rows)
+
+explain (costs off)
+select t1.a, s.*
+from t t1
+ left join lateral (select t2.a, coalesce(t1.a, 1) as c
+ from t t2 left join t t3 on t2.a = t3.a) s
+ on true
+ left join t t4 on true
+where s.a < s.c;
+ QUERY PLAN
+--------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop Left Join
+ -> Nested Loop
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on t t1
+ -> Seq Scan on t t2
+ Filter: (a < COALESCE(t1.a, 1))
+ -> Materialize
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on t t4
+ Optimizer: GPORCA
+(11 rows)
+
+select t1.a, s.*
+from t t1
+ left join lateral (select t2.a, coalesce(t1.a, 1) as c
+ from t t2 left join t t3 on t2.a = t3.a) s
+ on true
+ left join t t4 on true
+where s.a < s.c;
+ a | a | c
+---+---+---
+ 2 | 1 | 2
+ 2 | 1 | 2
+(2 rows)
+
rollback;
-- test case to expose miscomputation of required relid set for a PHV
explain (verbose, costs off)
@@ -5367,16 +6943,16 @@ where ss.a = ss.phv and f1 = 0;
Gather Motion 3:1 (slice1; segments: 3)
-> Nested Loop
Join Filter: true
- -> Result
- Filter: (((12) = 12) AND (parttbl.a = (12)))
- -> Dynamic Index Scan on parttbl_pkey on parttbl
- Index Cond: (a = 12)
- Number of partitions to scan: 1 (out of 1)
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Result
+ Filter: (((12) = 12) AND (parttbl.a = (12)))
+ -> Dynamic Index Scan on parttbl_pkey on parttbl
+ Index Cond: (a = 12)
+ Number of partitions to scan: 1 (out of 1)
-> Materialize
- -> Broadcast Motion 3:3 (slice2; segments: 3)
- -> Seq Scan on int4_tbl
- Filter: (f1 = 0)
- Optimizer: Pivotal Optimizer (GPORCA)
+ -> Seq Scan on int4_tbl
+ Filter: (f1 = 0)
+ Optimizer: GPORCA
(13 rows)
reset optimizer_enable_dynamicindexonlyscan;
@@ -5400,7 +6976,7 @@ select * from
ERROR: invalid reference to FROM-clause entry for table "y"
LINE 2: ...bl x join (int4_tbl x cross join int4_tbl y) j on q1 = y.f1;
^
-HINT: There is an entry for table "y", but it cannot be referenced from this part of the query.
+DETAIL: There is an entry for table "y", but it cannot be referenced from this part of the query.
select * from
int8_tbl x join (int4_tbl x cross join int4_tbl y(ff)) j on q1 = f1; -- ok
q1 | q2 | f1 | ff
@@ -5428,6 +7004,13 @@ ERROR: column "uunique1" does not exist
LINE 1: select uunique1 from
^
HINT: Perhaps you meant to reference the column "t1.unique1" or the column "t2.unique1".
+select ctid from
+ tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, need qualification
+ERROR: column "ctid" does not exist
+LINE 1: select ctid from
+ ^
+DETAIL: There are columns named "ctid", but they are in tables that cannot be referenced from this part of the query.
+HINT: Try using a table-qualified name.
--
-- Take care to reference the correct RTE
--
@@ -5439,6 +7022,19 @@ select atts.relid::regclass, s.* from pg_stats s join
ERROR: column atts.relid does not exist
LINE 1: select atts.relid::regclass, s.* from pg_stats s join
^
+-- Test bug in rangetable flattening
+explain (verbose, costs off)
+select 1 from
+ (select * from int8_tbl where q1 <> (select 42) offset 0) ss
+where false;
+ QUERY PLAN
+-------------------------
+ Result
+ Output: NULL::integer
+ One-Time Filter: false
+ Optimizer: GPORCA
+(5 rows)
+
--
-- Test LATERAL
--
@@ -6175,10 +7771,10 @@ select * from
-> Nested Loop
Output: c.q1, c.q2, a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint)), d.q1, (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)), ((COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)))
-> Hash Right Join
- Output: c.q1, c.q2, a.q1, a.q2, b.q1, d.q1, (COALESCE(b.q2, '42'::bigint)), (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))
+ Output: c.q1, c.q2, a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint)), d.q1, (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))
Hash Cond: (d.q1 = c.q2)
-> Nested Loop
- Output: a.q1, a.q2, b.q1, d.q1, (COALESCE(b.q2, '42'::bigint)), (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))
+ Output: a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint)), d.q1, (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))
-> Broadcast Motion 3:3 (slice2; segments: 3)
Output: a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint))
-> Hash Left Join
@@ -6206,8 +7802,36 @@ select * from
Output: ((COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)))
-> Result
Output: (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))
- Optimizer: Postgres query optimizer
-(37 rows)
+ Optimizer: GPORCA
+(38 rows)
+
+-- another case requiring nested PlaceHolderVars
+explain (verbose, costs off)
+select * from
+ (select 0 as val0) as ss0
+ left join (select 1 as val) as ss1 on true
+ left join lateral (select ss1.val as val_filtered where false) as ss2 on true;
+ QUERY PLAN
+-------------------------------
+ Nested Loop Left Join
+ Output: 0, (1), ((1))
+ Join Filter: false
+ -> Result
+ Output: 1
+ -> Result
+ Output: (1)
+ One-Time Filter: false
+ Optimizer: GPORCA
+(10 rows)
+
+select * from
+ (select 0 as val0) as ss0
+ left join (select 1 as val) as ss1 on true
+ left join lateral (select ss1.val as val_filtered where false) as ss2 on true;
+ val0 | val | val_filtered
+------+-----+--------------
+ 0 | 1 |
+(1 row)
-- case that breaks the old ph_may_need optimization
explain (verbose, costs off)
@@ -6298,8 +7922,41 @@ select * from
Output: (3)
-> Result
Output: 3
- Optimizer: Postgres query optimizer
-(16 rows)
+ Optimizer: GPORCA
+(17 rows)
+
+-- a new postponed-quals issue (bug #17768)
+explain (costs off)
+select * from int4_tbl t1,
+ lateral (select * from int4_tbl t2 inner join int4_tbl t3 on t1.f1 = 1
+ inner join (int4_tbl t4 left join int4_tbl t5 on true) on true) ss;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: 1
+ -> Seq Scan on int4_tbl t3
+ -> Materialize
+ -> Nested Loop
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: 1
+ -> Seq Scan on int4_tbl t2
+ -> Materialize
+ -> Nested Loop Left Join
+ -> Nested Loop
+ -> Seq Scan on int4_tbl t1
+ Filter: (f1 = 1)
+ -> Materialize
+ -> Redistribute Motion 3:3 (slice4; segments: 3)
+ Hash Key: 1
+ -> Seq Scan on int4_tbl t4
+ -> Materialize
+ -> Redistribute Motion 3:3 (slice5; segments: 3)
+ Hash Key: 1
+ -> Seq Scan on int4_tbl t5
+ Optimizer: GPORCA
+(24 rows)
-- check dummy rels with lateral references (bug #15694)
explain (verbose, costs off)
@@ -6311,6 +7968,7 @@ select * from int8_tbl i8 left join lateral
Output: i8.q1, i8.q2, f1, (i8.q2)
-> Nested Loop Left Join
Output: i8.q1, i8.q2, f1, (i8.q2)
+ Join Filter: false
-> Seq Scan on public.int8_tbl i8
Output: i8.q1, i8.q2
-> Result
@@ -6425,22 +8083,26 @@ select f1,g from int4_tbl a, (select f1 as g) ss;
ERROR: column "f1" does not exist
LINE 1: select f1,g from int4_tbl a, (select f1 as g) ss;
^
-HINT: There is a column named "f1" in table "a", but it cannot be referenced from this part of the query.
+DETAIL: There is a column named "f1" in table "a", but it cannot be referenced from this part of the query.
+HINT: To reference that column, you must mark this subquery with LATERAL.
select f1,g from int4_tbl a, (select a.f1 as g) ss;
ERROR: invalid reference to FROM-clause entry for table "a"
LINE 1: select f1,g from int4_tbl a, (select a.f1 as g) ss;
^
-HINT: There is an entry for table "a", but it cannot be referenced from this part of the query.
+DETAIL: There is an entry for table "a", but it cannot be referenced from this part of the query.
+HINT: To reference that table, you must mark this subquery with LATERAL.
select f1,g from int4_tbl a cross join (select f1 as g) ss;
ERROR: column "f1" does not exist
LINE 1: select f1,g from int4_tbl a cross join (select f1 as g) ss;
^
-HINT: There is a column named "f1" in table "a", but it cannot be referenced from this part of the query.
+DETAIL: There is a column named "f1" in table "a", but it cannot be referenced from this part of the query.
+HINT: To reference that column, you must mark this subquery with LATERAL.
select f1,g from int4_tbl a cross join (select a.f1 as g) ss;
ERROR: invalid reference to FROM-clause entry for table "a"
LINE 1: select f1,g from int4_tbl a cross join (select a.f1 as g) ss...
^
-HINT: There is an entry for table "a", but it cannot be referenced from this part of the query.
+DETAIL: There is an entry for table "a", but it cannot be referenced from this part of the query.
+HINT: To reference that table, you must mark this subquery with LATERAL.
-- SQL:2008 says the left table is in scope but illegal to access here
select f1,g from int4_tbl a right join lateral generate_series(0, a.f1) g on true;
ERROR: invalid reference to FROM-clause entry for table "a"
@@ -6471,12 +8133,12 @@ update xx1 set x2 = f1 from (select * from int4_tbl where f1 = x1) ss;
ERROR: column "x1" does not exist
LINE 1: ... set x2 = f1 from (select * from int4_tbl where f1 = x1) ss;
^
-HINT: There is a column named "x1" in table "xx1", but it cannot be referenced from this part of the query.
+DETAIL: There is a column named "x1" in table "xx1", but it cannot be referenced from this part of the query.
update xx1 set x2 = f1 from (select * from int4_tbl where f1 = xx1.x1) ss;
ERROR: invalid reference to FROM-clause entry for table "xx1"
LINE 1: ...t x2 = f1 from (select * from int4_tbl where f1 = xx1.x1) ss...
^
-HINT: There is an entry for table "xx1", but it cannot be referenced from this part of the query.
+DETAIL: There is an entry for table "xx1", but it cannot be referenced from this part of the query.
-- can't do it even with LATERAL:
update xx1 set x2 = f1 from lateral (select * from int4_tbl where f1 = x1) ss;
ERROR: invalid reference to FROM-clause entry for table "xx1"
@@ -6491,12 +8153,12 @@ delete from xx1 using (select * from int4_tbl where f1 = x1) ss;
ERROR: column "x1" does not exist
LINE 1: ...te from xx1 using (select * from int4_tbl where f1 = x1) ss;
^
-HINT: There is a column named "x1" in table "xx1", but it cannot be referenced from this part of the query.
+DETAIL: There is a column named "x1" in table "xx1", but it cannot be referenced from this part of the query.
delete from xx1 using (select * from int4_tbl where f1 = xx1.x1) ss;
ERROR: invalid reference to FROM-clause entry for table "xx1"
LINE 1: ...from xx1 using (select * from int4_tbl where f1 = xx1.x1) ss...
^
-HINT: There is an entry for table "xx1", but it cannot be referenced from this part of the query.
+DETAIL: There is an entry for table "xx1", but it cannot be referenced from this part of the query.
delete from xx1 using lateral (select * from int4_tbl where f1 = x1) ss;
ERROR: invalid reference to FROM-clause entry for table "xx1"
LINE 1: ...xx1 using lateral (select * from int4_tbl where f1 = x1) ss;
@@ -6998,6 +8660,32 @@ left join j2 on j1.id1 = j2.id1 where j1.id2 = 1;
Settings: enable_nestloop=on
(16 rows)
+create unique index j1_id2_idx on j1(id2) where id2 is not null;
+DETAIL: Distribution key column "id1" is not included in the constraint.
+ERROR: UNIQUE index must contain all columns in the table's distribution key
+-- ensure we don't use a partial unique index as unique proofs
+explain (verbose, costs off)
+select * from j1
+inner join j2 on j1.id2 = j2.id2;
+ QUERY PLAN
+--------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Output: j1.id1, j1.id2, j2.id1, j2.id2
+ -> Nested Loop
+ Output: j1.id1, j1.id2, j2.id1, j2.id2
+ Join Filter: true
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ Output: j2.id1, j2.id2
+ -> Seq Scan on public.j2
+ Output: j2.id1, j2.id2
+ -> Index Scan using j1_pkey on public.j1
+ Output: j1.id1, j1.id2
+ Index Cond: (j1.id2 = j2.id2)
+ Optimizer: GPORCA
+(14 rows)
+
+drop index j1_id2_idx;
+ERROR: index "j1_id2_idx" does not exist
-- validate logic in merge joins which skips mark and restore.
-- it should only do this if all quals which were used to detect the unique
-- are present as join quals, and not plain quals.
@@ -7178,6 +8866,36 @@ where exists (select 1 from j3
(24 rows)
drop table j3;
+-- Test that we do not account for nullingrels when looking up statistics
+CREATE TABLE group_tbl (a INT, b INT);
+INSERT INTO group_tbl SELECT 1, 1;
+CREATE STATISTICS group_tbl_stat (ndistinct) ON a, b FROM group_tbl;
+ANALYZE group_tbl;
+EXPLAIN (COSTS OFF)
+SELECT 1 FROM group_tbl t1
+ LEFT JOIN (SELECT a c1, COALESCE(a) c2 FROM group_tbl t2) s ON TRUE
+GROUP BY s.c1, s.c2;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------
+ Result
+ -> Gather Motion 3:1 (slice1; segments: 3)
+ -> GroupAggregate
+ Group Key: t2.a, (COALESCE(t2.a))
+ -> Sort
+ Sort Key: t2.a, (COALESCE(t2.a))
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: t2.a, (COALESCE(t2.a))
+ -> Nested Loop Left Join
+ Join Filter: true
+ -> Seq Scan on group_tbl t1
+ -> Materialize
+ -> Result
+ -> Broadcast Motion 3:3 (slice3; segments: 3)
+ -> Seq Scan on group_tbl t2
+ Optimizer: GPORCA
+(16 rows)
+
+DROP TABLE group_tbl;
reset enable_hashjoin;
reset enable_nestloop;
reset enable_seqscan;
diff --git a/src/test/regress/expected/jsonb_optimizer.out b/src/test/regress/expected/jsonb_optimizer.out
index 6250207107a..18095582265 100644
--- a/src/test/regress/expected/jsonb_optimizer.out
+++ b/src/test/regress/expected/jsonb_optimizer.out
@@ -1,3 +1,10 @@
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+CREATE TABLE testjsonb (
+ j jsonb
+);
+\set filename :abs_srcdir '/data/jsonb.data'
+COPY testjsonb FROM :'filename';
-- Strings.
SELECT '""'::jsonb; -- OK.
jsonb
@@ -303,6 +310,31 @@ LINE 1: SELECT '{
DETAIL: Expected JSON value, but found "}".
CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":}
-- ERROR missing value for last field
+-- test non-error-throwing input
+select pg_input_is_valid('{"a":true}', 'jsonb');
+ pg_input_is_valid
+-------------------
+ t
+(1 row)
+
+select pg_input_is_valid('{"a":true', 'jsonb');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+select * from pg_input_error_info('{"a":true', 'jsonb');
+ message | detail | hint | sql_error_code
+------------------------------------+--------------------------------------+------+----------------
+ invalid input syntax for type json | The input string ended unexpectedly. | | 22P02
+(1 row)
+
+select * from pg_input_error_info('{"a":1e1000000}', 'jsonb');
+ message | detail | hint | sql_error_code
+--------------------------------+--------+------+----------------
+ value overflows numeric format | | | 22003
+(1 row)
+
-- make sure jsonb is passed through json generators without being escaped
SELECT array_to_json(ARRAY [jsonb '{"a":1}', jsonb '{"b":[2,3]}']);
array_to_json
@@ -1560,6 +1592,13 @@ SELECT jsonb_object_agg(name, type) FROM foo;
INSERT INTO foo VALUES (999999, NULL, 'bar');
SELECT jsonb_object_agg(name, type) FROM foo;
ERROR: field name must not be null
+-- edge case for parser
+SELECT jsonb_object_agg(DISTINCT 'a', 'abc');
+ jsonb_object_agg
+------------------
+ {"a": "abc"}
+(1 row)
+
-- jsonb_object
-- empty object, one dimension
SELECT jsonb_object('{}');
@@ -3015,17 +3054,16 @@ SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled'];
EXPLAIN (COSTS OFF)
SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
- QUERY PLAN
------------------------------------------------------------------------------
- Finalize Aggregate
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
- -> Partial Aggregate
- -> Bitmap Heap Scan on testjsonb
- Recheck Cond: (j @@ '($."wait" == null)'::jsonpath)
- -> Bitmap Index Scan on jidx
- Index Cond: (j @@ '($."wait" == null)'::jsonpath)
+ -> Bitmap Heap Scan on testjsonb
+ Recheck Cond: (j @@ '($."wait" == null)'::jsonpath)
+ -> Bitmap Index Scan on jidx
+ Index Cond: (j @@ '($."wait" == null)'::jsonpath)
Optimizer: Pivotal Optimizer (GPORCA)
-(8 rows)
+(7 rows)
SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
count
@@ -3131,17 +3169,16 @@ SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled
EXPLAIN (COSTS OFF)
SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
- QUERY PLAN
--------------------------------------------------------------------------------
- Finalize Aggregate
+ QUERY PLAN
+------------------------------------------------------------------------
+ Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
- -> Partial Aggregate
- -> Bitmap Heap Scan on testjsonb
- Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
- -> Bitmap Index Scan on jidx
- Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
+ -> Bitmap Heap Scan on testjsonb
+ Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
+ -> Bitmap Index Scan on jidx
+ Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
Optimizer: Pivotal Optimizer (GPORCA)
-(8 rows)
+(7 rows)
SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
count
@@ -3440,17 +3477,16 @@ SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
EXPLAIN (COSTS OFF)
SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
- QUERY PLAN
--------------------------------------------------------------------------------
- Finalize Aggregate
+ QUERY PLAN
+------------------------------------------------------------------------
+ Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
- -> Partial Aggregate
- -> Bitmap Heap Scan on testjsonb
- Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
- -> Bitmap Index Scan on jidx
- Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
+ -> Bitmap Heap Scan on testjsonb
+ Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
+ -> Bitmap Index Scan on jidx
+ Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
Optimizer: Pivotal Optimizer (GPORCA)
-(8 rows)
+(7 rows)
SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
count
@@ -5209,6 +5245,30 @@ DETAIL: The path assumes key is a composite object, but it is a scalar value.
update test_jsonb_subscript set test_json[0][0] = '1';
ERROR: cannot replace existing key
DETAIL: The path assumes key is a composite object, but it is a scalar value.
+-- try some things with short-header and toasted subscript values
+drop table test_jsonb_subscript;
+create temp table test_jsonb_subscript (
+ id text,
+ test_json jsonb
+);
+insert into test_jsonb_subscript values('foo', '{"foo": "bar"}');
+insert into test_jsonb_subscript
+ select s, ('{"' || s || '": "bar"}')::jsonb from repeat('xyzzy', 500) s;
+select length(id), test_json[id] from test_jsonb_subscript;
+ length | test_json
+--------+-----------
+ 3 | "bar"
+ 2500 | "bar"
+(2 rows)
+
+update test_jsonb_subscript set test_json[id] = '"baz"';
+select length(id), test_json[id] from test_jsonb_subscript;
+ length | test_json
+--------+-----------
+ 3 | "baz"
+ 2500 | "baz"
+(2 rows)
+
-- jsonb to tsvector
select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb);
to_tsvector
diff --git a/src/test/regress/expected/limit_optimizer.out b/src/test/regress/expected/limit_optimizer.out
index 6508365fc81..3dbcf28dccc 100644
--- a/src/test/regress/expected/limit_optimizer.out
+++ b/src/test/regress/expected/limit_optimizer.out
@@ -478,10 +478,10 @@ CREATE VIEW limit_thousand_v_1 AS SELECT thousand FROM onek WHERE thousand < 995
----------+---------+-----------+----------+---------+---------+-------------
thousand | integer | | | | plain |
View definition:
- SELECT onek.thousand
+ SELECT thousand
FROM onek
- WHERE onek.thousand < 995
- ORDER BY onek.thousand
+ WHERE thousand < 995
+ ORDER BY thousand
OFFSET 10
FETCH FIRST 5 ROWS WITH TIES;
@@ -493,10 +493,10 @@ CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995
----------+---------+-----------+----------+---------+---------+-------------
thousand | integer | | | | plain |
View definition:
- SELECT onek.thousand
+ SELECT thousand
FROM onek
- WHERE onek.thousand < 995
- ORDER BY onek.thousand
+ WHERE thousand < 995
+ ORDER BY thousand
OFFSET 10
LIMIT 5;
@@ -511,10 +511,10 @@ CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
----------+---------+-----------+----------+---------+---------+-------------
thousand | integer | | | | plain |
View definition:
- SELECT onek.thousand
+ SELECT thousand
FROM onek
- WHERE onek.thousand < 995
- ORDER BY onek.thousand
+ WHERE thousand < 995
+ ORDER BY thousand
FETCH FIRST (NULL::integer + 1) ROWS WITH TIES;
CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
@@ -525,10 +525,10 @@ CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
----------+---------+-----------+----------+---------+---------+-------------
thousand | integer | | | | plain |
View definition:
- SELECT onek.thousand
+ SELECT thousand
FROM onek
- WHERE onek.thousand < 995
- ORDER BY onek.thousand
+ WHERE thousand < 995
+ ORDER BY thousand
LIMIT ALL;
-- leave these views
diff --git a/src/test/regress/expected/matview_optimizer.out b/src/test/regress/expected/matview_optimizer.out
index d5c3103e1ad..c68b30d4d02 100644
--- a/src/test/regress/expected/matview_optimizer.out
+++ b/src/test/regress/expected/matview_optimizer.out
@@ -125,10 +125,10 @@ CREATE INDEX mvtest_aa ON mvtest_bb (grandtot);
type | text | | | | extended | |
totamt | numeric | | | | main | |
View definition:
- SELECT mvtest_tv.type,
- mvtest_tv.totamt
+ SELECT type,
+ totamt
FROM mvtest_tv
- ORDER BY mvtest_tv.type;
+ ORDER BY type;
Distributed randomly
\d+ mvtest_tvm
@@ -138,10 +138,10 @@ Distributed randomly
type | text | | | | extended | |
totamt | numeric | | | | main | |
View definition:
- SELECT mvtest_tv.type,
- mvtest_tv.totamt
+ SELECT type,
+ totamt
FROM mvtest_tv
- ORDER BY mvtest_tv.type;
+ ORDER BY type;
Distributed randomly
\d+ mvtest_tvvm
@@ -150,7 +150,7 @@ Distributed randomly
----------+---------+-----------+----------+---------+---------+--------------+-------------
grandtot | numeric | | | | main | |
View definition:
- SELECT mvtest_tvv.grandtot
+ SELECT grandtot
FROM mvtest_tvv;
Distributed randomly
@@ -162,7 +162,7 @@ Distributed randomly
Indexes:
"mvtest_aa" btree (grandtot)
View definition:
- SELECT mvtest_tvvmv.grandtot
+ SELECT grandtot
FROM mvtest_tvvmv;
Distributed randomly
@@ -178,7 +178,7 @@ ALTER MATERIALIZED VIEW mvtest_tvm SET SCHEMA mvtest_mvschema;
Indexes:
"mvtest_tvmm_pred" UNIQUE, btree (grandtot) WHERE grandtot < 0::numeric
View definition:
- SELECT sum(mvtest_tvm.totamt) AS grandtot
+ SELECT sum(totamt) AS grandtot
FROM mvtest_mvschema.mvtest_tvm;
Distributed by: (grandtot)
@@ -190,10 +190,10 @@ SET search_path = mvtest_mvschema, public;
type | text | | | | extended | |
totamt | numeric | | | | main | |
View definition:
- SELECT mvtest_tv.type,
- mvtest_tv.totamt
+ SELECT type,
+ totamt
FROM mvtest_tv
- ORDER BY mvtest_tv.type;
+ ORDER BY type;
Distributed randomly
-- modify the underlying table data
@@ -613,10 +613,10 @@ SET ROLE regress_user_mvtest;
-- duplicate all the aliases used in those queries
CREATE TABLE mvtest_foo_data AS SELECT i,
i+1 AS tid,
- md5(random()::text) AS mv,
- md5(random()::text) AS newdata,
- md5(random()::text) AS newdata2,
- md5(random()::text) AS diff
+ fipshash(random()::text) AS mv,
+ fipshash(random()::text) AS newdata,
+ fipshash(random()::text) AS newdata2,
+ fipshash(random()::text) AS diff
FROM generate_series(1, 10) i;
CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data distributed by(i);
CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data distributed by(i);
@@ -629,6 +629,26 @@ REFRESH MATERIALIZED VIEW mvtest_mv_foo;
REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_foo;
DROP OWNED BY regress_user_mvtest CASCADE;
DROP ROLE regress_user_mvtest;
+-- Concurrent refresh requires a unique index on the materialized
+-- view. Test what happens if it's dropped during the refresh.
+CREATE OR REPLACE FUNCTION mvtest_drop_the_index()
+ RETURNS bool AS $$
+BEGIN
+ EXECUTE 'DROP INDEX IF EXISTS mvtest_drop_idx';
+ RETURN true;
+END;
+$$ LANGUAGE plpgsql;
+CREATE MATERIALIZED VIEW drop_idx_matview AS
+ SELECT 1 as i WHERE mvtest_drop_the_index();
+ERROR: function cannot execute on a QE slice because it issues a non-SELECT statement
+CONTEXT: SQL statement "DROP INDEX IF EXISTS mvtest_drop_idx"
+PL/pgSQL function mvtest_drop_the_index() line 3 at EXECUTE
+CREATE UNIQUE INDEX mvtest_drop_idx ON drop_idx_matview (i);
+ERROR: relation "drop_idx_matview" does not exist
+REFRESH MATERIALIZED VIEW CONCURRENTLY drop_idx_matview;
+ERROR: relation "drop_idx_matview" does not exist
+DROP MATERIALIZED VIEW drop_idx_matview; -- clean up
+ERROR: materialized view "drop_idx_matview" does not exist
-- make sure that create WITH NO DATA works via SPI
BEGIN;
CREATE FUNCTION mvtest_func()
diff --git a/src/test/regress/expected/memoize_optimizer.out b/src/test/regress/expected/memoize_optimizer.out
index 1829cfe61bc..d3123d0d485 100644
--- a/src/test/regress/expected/memoize_optimizer.out
+++ b/src/test/regress/expected/memoize_optimizer.out
@@ -70,40 +70,73 @@ WHERE t2.unique1 < 1000;
-- Try with LATERAL joins
SELECT explain_memoize('
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
-LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
+LATERAL (SELECT t2.unique1 FROM tenk1 t2
+ WHERE t1.twenty = t2.unique1 OFFSET 0) t2
WHERE t1.unique1 < 1000;', false);
- explain_memoize
--------------------------------------------------------------------------------------------------------
- Finalize Aggregate (actual rows=1 loops=N)
- -> Gather Motion 3:1 (slice1; segments: 3) (actual rows=3 loops=N)
- -> Partial Aggregate (actual rows=1 loops=N)
- -> Nested Loop (actual rows=400 loops=N)
- -> Redistribute Motion 3:3 (slice2; segments: 3) (actual rows=400 loops=N)
- Hash Key: t1.twenty
- -> Seq Scan on tenk1 t1 (actual rows=340 loops=N)
- Filter: (unique1 < 1000)
- Rows Removed by Filter: 2906
- -> Memoize (actual rows=1 loops=N)
- Cache Key: t1.twenty
- Cache Mode: logical
- -> Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=N)
- Index Cond: (unique1 = t1.twenty)
- Heap Fetches: N
+ explain_memoize
+----------------------------------------------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=N)
+ -> Nested Loop (actual rows=1000 loops=N)
+ -> Gather Motion 3:1 (slice1; segments: 3) (actual rows=1000 loops=N)
+ -> Seq Scan on tenk1 t1 (actual rows=340 loops=N)
+ Filter: (unique1 < 1000)
+ Rows Removed by Filter: 2906
+ -> Memoize (actual rows=1 loops=N)
+ Cache Key: t1.twenty
+ Cache Mode: binary
+ Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB
+ -> Result (actual rows=1 loops=N)
+ Filter: (t1.twenty = t2.unique1)
+ -> Materialize (actual rows=10000 loops=N)
+ -> Gather Motion 3:1 (slice2; segments: 3) (actual rows=10000 loops=N)
+ -> Seq Scan on tenk1 t2 (actual rows=3386 loops=N)
+ -> Materialize (actual rows=1 loops=N)
Optimizer: Postgres query optimizer
-(16 rows)
+(17 rows)
-- And check we get the expected results.
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
-LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
+LATERAL (SELECT t2.unique1 FROM tenk1 t2
+ WHERE t1.twenty = t2.unique1 OFFSET 0) t2
WHERE t1.unique1 < 1000;
count | avg
-------+--------------------
1000 | 9.5000000000000000
(1 row)
--- Reduce work_mem so that we see some cache evictions
-SET work_mem TO '64kB';
SET enable_mergejoin TO off;
+-- Test for varlena datatype with expr evaluation
+CREATE TABLE expr_key (x numeric, t text);
+INSERT INTO expr_key (x, t)
+SELECT d1::numeric, d1::text FROM (
+ SELECT round((d / pi())::numeric, 7) AS d1 FROM generate_series(1, 20) AS d
+) t;
+-- duplicate rows so we get some cache hits
+INSERT INTO expr_key SELECT * FROM expr_key;
+CREATE INDEX expr_key_idx_x_t ON expr_key (x, t);
+VACUUM ANALYZE expr_key;
+-- Ensure we get we get a cache miss and hit for each of the 20 distinct values
+SELECT explain_memoize('
+SELECT * FROM expr_key t1 INNER JOIN expr_key t2
+ON t1.x = t2.t::numeric AND t1.t::numeric = t2.x;', false);
+ explain_memoize
+--------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3) (actual rows=80 loops=N)
+ -> Nested Loop (actual rows=28 loops=N)
+ Join Filter: true
+ -> Redistribute Motion 3:3 (slice2; segments: 3) (actual rows=14 loops=N)
+ Hash Key: (t1.t)::numeric
+ -> Seq Scan on expr_key t1 (actual rows=14 loops=N)
+ -> Index Scan using expr_key_idx_x_t on expr_key t2 (actual rows=2 loops=N)
+ Index Cond: (x = (t1.t)::numeric)
+ Filter: (t1.x = (t)::numeric)
+ Optimizer: GPORCA
+(10 rows)
+
+DROP TABLE expr_key;
+-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
+SET work_mem TO '64kB';
+SET hash_mem_multiplier TO 1.0;
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
-- between different machines.
@@ -172,7 +205,7 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'n' as
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE INDEX strtest_n_idx ON strtest (n);
CREATE INDEX strtest_t_idx ON strtest (t);
-INSERT INTO strtest VALUES('one','one'),('two','two'),('three',repeat(md5('three'),100));
+INSERT INTO strtest VALUES('one','one'),('two','two'),('three',repeat(fipshash('three'),100));
-- duplicate rows so we get some cache hits
INSERT INTO strtest SELECT * FROM strtest;
ANALYZE strtest;
@@ -207,6 +240,51 @@ SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.t >= s2.t;', false);
(8 rows)
DROP TABLE strtest;
+-- Ensure memoize works with partitionwise join
+SET enable_partitionwise_join TO on;
+CREATE TABLE prt (a int) PARTITION BY RANGE(a);
+CREATE TABLE prt_p1 PARTITION OF prt FOR VALUES FROM (0) TO (10);
+CREATE TABLE prt_p2 PARTITION OF prt FOR VALUES FROM (10) TO (20);
+INSERT INTO prt VALUES (0), (0), (0), (0);
+INSERT INTO prt VALUES (10), (10), (10), (10);
+CREATE INDEX iprt_p1_a ON prt_p1 (a);
+CREATE INDEX iprt_p2_a ON prt_p2 (a);
+ANALYZE prt;
+SELECT explain_memoize('
+SELECT * FROM prt t1 INNER JOIN prt t2 ON t1.a = t2.a;', false);
+ explain_memoize
+-----------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3) (actual rows=32 loops=N)
+ -> Nested Loop (actual rows=16 loops=N)
+ Join Filter: (t1.a = t2.a)
+ -> Dynamic Seq Scan on prt t1 (actual rows=4 loops=N)
+ Number of partitions to scan: 2 (out of 2)
+ Partitions scanned: Avg 2.0 x 3 workers. Max 2 parts (seg0).
+ -> Dynamic Seq Scan on prt t2 (actual rows=3 loops=N)
+ Number of partitions to scan: 2 (out of 2)
+ Partitions scanned: Avg 1.4 x 3 workers of 5 scans. Max 2 parts (seg2).
+(10 rows)
+
+-- Ensure memoize works with parameterized union-all Append path
+SET enable_partitionwise_join TO off;
+SELECT explain_memoize('
+SELECT * FROM prt_p1 t1 INNER JOIN
+(SELECT * FROM prt_p1 UNION ALL SELECT * FROM prt_p2) t2
+ON t1.a = t2.a;', false);
+ explain_memoize
+-----------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3) (actual rows=16 loops=N)
+ -> Nested Loop (actual rows=16 loops=N)
+ Join Filter: true
+ -> Index Scan using iprt_p1_a on prt_p1 t1 (actual rows=4 loops=N)
+ -> Append (actual rows=4 loops=N)
+ -> Seq Scan on prt_p1 (actual rows=4 loops=N)
+ -> Seq Scan on prt_p2 (actual rows=4 loops=N)
+ Index Cond: (a = prt_p1.a)
+(9 rows)
+
+DROP TABLE prt;
+RESET enable_partitionwise_join;
-- Exercise Memoize code that flushes the cache when a parameter changes which
-- is not part of the cache key.
-- Ensure we get a Memoize plan
@@ -263,6 +341,7 @@ RESET enable_bitmapscan;
RESET enable_hashjoin;
RESET optimizer_enable_hashjoin;
RESET optimizer_enable_bitmapscan;
+RESET hash_mem_multiplier;
-- Test parallel plans with Memoize
SET min_parallel_table_scan_size TO 0;
SET parallel_setup_cost TO 0;
@@ -284,12 +363,10 @@ WHERE t1.unique1 < 1000;
-> Hash
-> Redistribute Motion 3:3 (slice2; segments: 3)
Hash Key: t1.twenty
- -> Bitmap Heap Scan on tenk1 t1
- Recheck Cond: (unique1 < 1000)
- -> Bitmap Index Scan on tenk1_unique1
- Index Cond: (unique1 < 1000)
+ -> Seq Scan on tenk1 t1
+ Filter: (unique1 < 1000)
Optimizer: Postgres query optimizer
-(14 rows)
+(12 rows)
-- And ensure the parallel plan gives us the correct results.
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
diff --git a/src/test/regress/expected/misc_functions_optimizer.out b/src/test/regress/expected/misc_functions_optimizer.out
index f5187fad45b..b46c7055fa3 100644
--- a/src/test/regress/expected/misc_functions_optimizer.out
+++ b/src/test/regress/expected/misc_functions_optimizer.out
@@ -1,3 +1,7 @@
+-- directory paths and dlsuffix are passed to us in environment variables
+\getenv libdir PG_LIBDIR
+\getenv dlsuffix PG_DLSUFFIX
+\set regresslib :libdir '/regress' :dlsuffix
--
-- num_nulls()
--
@@ -135,6 +139,145 @@ ERROR: function num_nulls() does not exist
LINE 1: SELECT num_nulls();
^
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
+--
+-- canonicalize_path()
+--
+CREATE FUNCTION test_canonicalize_path(text)
+ RETURNS text
+ AS :'regresslib'
+ LANGUAGE C STRICT IMMUTABLE;
+SELECT test_canonicalize_path('/');
+ test_canonicalize_path
+------------------------
+ /
+(1 row)
+
+SELECT test_canonicalize_path('/./abc/def/');
+ test_canonicalize_path
+------------------------
+ /abc/def
+(1 row)
+
+SELECT test_canonicalize_path('/./../abc/def');
+ test_canonicalize_path
+------------------------
+ /abc/def
+(1 row)
+
+SELECT test_canonicalize_path('/./../../abc/def/');
+ test_canonicalize_path
+------------------------
+ /abc/def
+(1 row)
+
+SELECT test_canonicalize_path('/abc/.././def/ghi');
+ test_canonicalize_path
+------------------------
+ /def/ghi
+(1 row)
+
+SELECT test_canonicalize_path('/abc/./../def/ghi//');
+ test_canonicalize_path
+------------------------
+ /def/ghi
+(1 row)
+
+SELECT test_canonicalize_path('/abc/def/../..');
+ test_canonicalize_path
+------------------------
+ /
+(1 row)
+
+SELECT test_canonicalize_path('/abc/def/../../..');
+ test_canonicalize_path
+------------------------
+ /
+(1 row)
+
+SELECT test_canonicalize_path('/abc/def/../../../../ghi/jkl');
+ test_canonicalize_path
+------------------------
+ /ghi/jkl
+(1 row)
+
+SELECT test_canonicalize_path('.');
+ test_canonicalize_path
+------------------------
+ .
+(1 row)
+
+SELECT test_canonicalize_path('./');
+ test_canonicalize_path
+------------------------
+ .
+(1 row)
+
+SELECT test_canonicalize_path('./abc/..');
+ test_canonicalize_path
+------------------------
+ .
+(1 row)
+
+SELECT test_canonicalize_path('abc/../');
+ test_canonicalize_path
+------------------------
+ .
+(1 row)
+
+SELECT test_canonicalize_path('abc/../def');
+ test_canonicalize_path
+------------------------
+ def
+(1 row)
+
+SELECT test_canonicalize_path('..');
+ test_canonicalize_path
+------------------------
+ ..
+(1 row)
+
+SELECT test_canonicalize_path('../abc/def');
+ test_canonicalize_path
+------------------------
+ ../abc/def
+(1 row)
+
+SELECT test_canonicalize_path('../abc/..');
+ test_canonicalize_path
+------------------------
+ ..
+(1 row)
+
+SELECT test_canonicalize_path('../abc/../def');
+ test_canonicalize_path
+------------------------
+ ../def
+(1 row)
+
+SELECT test_canonicalize_path('../abc/../../def/ghi');
+ test_canonicalize_path
+------------------------
+ ../../def/ghi
+(1 row)
+
+SELECT test_canonicalize_path('./abc/./def/.');
+ test_canonicalize_path
+------------------------
+ abc/def
+(1 row)
+
+SELECT test_canonicalize_path('./abc/././def/.');
+ test_canonicalize_path
+------------------------
+ abc/def
+(1 row)
+
+SELECT test_canonicalize_path('./abc/./def/.././ghi/../../../jkl/mno');
+ test_canonicalize_path
+------------------------
+ ../jkl/mno
+(1 row)
+
--
-- pg_log_backend_memory_contexts()
--
@@ -149,6 +292,13 @@ SELECT pg_log_backend_memory_contexts(pg_backend_pid());
t
(1 row)
+SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity
+ WHERE backend_type = 'checkpointer';
+ pg_log_backend_memory_contexts
+--------------------------------
+ t
+(1 row)
+
CREATE ROLE regress_log_memory;
NOTICE: resource queue required -- using default resource queue "pg_default"
SELECT has_function_privilege('regress_log_memory',
@@ -225,18 +375,105 @@ select count(*) >= 0 as ok from pg_ls_archive_statusdir();
t
(1 row)
+-- pg_read_file()
+select length(pg_read_file('postmaster.pid')) > 20;
+ ?column?
+----------
+ t
+(1 row)
+
+select length(pg_read_file('postmaster.pid', 1, 20));
+ length
+--------
+ 20
+(1 row)
+
+-- Test missing_ok
+select pg_read_file('does not exist'); -- error
+ERROR: could not open file "does not exist" for reading: No such file or directory
+select pg_read_file('does not exist', true) IS NULL; -- ok
+ ?column?
+----------
+ t
+(1 row)
+
+-- Test invalid argument
+select pg_read_file('does not exist', 0, -1); -- error
+ERROR: requested length cannot be negative
+select pg_read_file('does not exist', 0, -1, true); -- error
+ERROR: requested length cannot be negative
+-- pg_read_binary_file()
+select length(pg_read_binary_file('postmaster.pid')) > 20;
+ ?column?
+----------
+ t
+(1 row)
+
+select length(pg_read_binary_file('postmaster.pid', 1, 20));
+ length
+--------
+ 20
+(1 row)
+
+-- Test missing_ok
+select pg_read_binary_file('does not exist'); -- error
+ERROR: could not open file "does not exist" for reading: No such file or directory
+select pg_read_binary_file('does not exist', true) IS NULL; -- ok
+ ?column?
+----------
+ t
+(1 row)
+
+-- Test invalid argument
+select pg_read_binary_file('does not exist', 0, -1); -- error
+ERROR: requested length cannot be negative
+select pg_read_binary_file('does not exist', 0, -1, true); -- error
+ERROR: requested length cannot be negative
+-- pg_stat_file()
+select size > 20, isdir from pg_stat_file('postmaster.pid');
+ ?column? | isdir
+----------+-------
+ t | f
+(1 row)
+
+-- pg_ls_dir()
select * from (select pg_ls_dir('.') a) a where a = 'base' limit 1;
a
------
base
(1 row)
+-- Test missing_ok (second argument)
+select pg_ls_dir('does not exist', false, false); -- error
+ERROR: could not open directory "does not exist": No such file or directory
+select pg_ls_dir('does not exist', true, false); -- ok
+ pg_ls_dir
+-----------
+(0 rows)
+
+-- Test include_dot_dirs (third argument)
+select count(*) = 1 as dot_found
+ from pg_ls_dir('.', false, true) as ls where ls = '.';
+ dot_found
+-----------
+ t
+(1 row)
+
+select count(*) = 1 as dot_found
+ from pg_ls_dir('.', false, false) as ls where ls = '.';
+ dot_found
+-----------
+ f
+(1 row)
+
+-- pg_timezone_names()
select * from (select (pg_timezone_names()).name) ptn where name='UTC' limit 1;
name
------
UTC
(1 row)
+-- pg_tablespace_databases()
select count(*) > 0 from
(select pg_tablespace_databases(oid) as pts from pg_tablespace
where spcname = 'pg_default') pts
@@ -246,6 +483,56 @@ select count(*) > 0 from
t
(1 row)
+--
+-- Test replication slot directory functions
+--
+CREATE ROLE regress_slot_dir_funcs;
+-- Not available by default.
+SELECT has_function_privilege('regress_slot_dir_funcs',
+ 'pg_ls_logicalsnapdir()', 'EXECUTE');
+ has_function_privilege
+------------------------
+ f
+(1 row)
+
+SELECT has_function_privilege('regress_slot_dir_funcs',
+ 'pg_ls_logicalmapdir()', 'EXECUTE');
+ has_function_privilege
+------------------------
+ f
+(1 row)
+
+SELECT has_function_privilege('regress_slot_dir_funcs',
+ 'pg_ls_replslotdir(text)', 'EXECUTE');
+ has_function_privilege
+------------------------
+ f
+(1 row)
+
+GRANT pg_monitor TO regress_slot_dir_funcs;
+-- Role is now part of pg_monitor, so these are available.
+SELECT has_function_privilege('regress_slot_dir_funcs',
+ 'pg_ls_logicalsnapdir()', 'EXECUTE');
+ has_function_privilege
+------------------------
+ t
+(1 row)
+
+SELECT has_function_privilege('regress_slot_dir_funcs',
+ 'pg_ls_logicalmapdir()', 'EXECUTE');
+ has_function_privilege
+------------------------
+ t
+(1 row)
+
+SELECT has_function_privilege('regress_slot_dir_funcs',
+ 'pg_ls_replslotdir(text)', 'EXECUTE');
+ has_function_privilege
+------------------------
+ t
+(1 row)
+
+DROP ROLE regress_slot_dir_funcs;
--
-- Test adding a support function to a subject function
--
@@ -269,6 +556,10 @@ WHERE my_int_eq(a.unique2, 42);
(8 rows)
-- With support function that knows it's int4eq, we get a different plan
+CREATE FUNCTION test_support_func(internal)
+ RETURNS internal
+ AS :'regresslib', 'test_support_func'
+ LANGUAGE C STRICT;
ALTER FUNCTION my_int_eq(int, int) SUPPORT test_support_func;
EXPLAIN (COSTS OFF)
SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1
@@ -305,7 +596,7 @@ SELECT * FROM tenk1 a JOIN my_gen_series(1,1000) g ON a.unique1 = g;
EXPLAIN (COSTS OFF)
SELECT * FROM tenk1 a JOIN my_gen_series(1,10) g ON a.unique1 = g;
- QUERY PLAN
+ QUERY PLAN
------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
-> Hash Join
@@ -316,3 +607,51 @@ SELECT * FROM tenk1 a JOIN my_gen_series(1,10) g ON a.unique1 = g;
Optimizer: Pivotal Optimizer (GPORCA)
(7 rows)
+-- Test functions for control data
+SELECT count(*) > 0 AS ok FROM pg_control_checkpoint();
+ ok
+----
+ t
+(1 row)
+
+SELECT count(*) > 0 AS ok FROM pg_control_init();
+ ok
+----
+ t
+(1 row)
+
+SELECT count(*) > 0 AS ok FROM pg_control_recovery();
+ ok
+----
+ t
+(1 row)
+
+SELECT count(*) > 0 AS ok FROM pg_control_system();
+ ok
+----
+ t
+(1 row)
+
+-- pg_split_walfile_name
+SELECT * FROM pg_split_walfile_name(NULL);
+ segment_number | timeline_id
+----------------+-------------
+ |
+(1 row)
+
+SELECT * FROM pg_split_walfile_name('invalid');
+ERROR: invalid WAL file name "invalid"
+SELECT segment_number > 0 AS ok_segment_number, timeline_id
+ FROM pg_split_walfile_name('000000010000000100000000');
+ ok_segment_number | timeline_id
+-------------------+-------------
+ t | 1
+(1 row)
+
+SELECT segment_number > 0 AS ok_segment_number, timeline_id
+ FROM pg_split_walfile_name('ffffffFF00000001000000af');
+ ok_segment_number | timeline_id
+-------------------+-------------
+ t | 4294967295
+(1 row)
+
diff --git a/src/test/regress/expected/notin_optimizer.out b/src/test/regress/expected/notin_optimizer.out
index 20df710612e..8d68984a563 100644
--- a/src/test/regress/expected/notin_optimizer.out
+++ b/src/test/regress/expected/notin_optimizer.out
@@ -1459,15 +1459,15 @@ select * from t1_12930 where (a, b) not in (select a, b from t2_12930);
(0 rows)
explain select * from t1_12930 where (a, b) not in (select a, b from t2_12930) and b is not null;
- QUERY PLAN
-------------------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3) (cost=751.50..206932.71 rows=16 width=8)
- -> Hash Left Anti Semi (Not-In) Join (cost=751.50..206932.49 rows=5 width=8)
- Hash Cond: ((t1_12930.a = t2_12930.a) AND (t1_12930.b = t2_12930.b))
- -> Seq Scan on t1_12930 (cost=0.00..321.00 rows=28671 width=8)
- Filter: (b IS NOT NULL)
- -> Hash (cost=321.00..321.00 rows=28700 width=8)
- -> Seq Scan on t2_12930 (cost=0.00..321.00 rows=28700 width=8)
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3) (cost=751.07..208007.32 rows=64510 width=8)
+ -> Hash Right Anti Join (cost=751.07..207147.18 rows=21503 width=8)
+ Hash Cond: ((t2_12930.a = t1_12930.a) AND (t2_12930.b = t1_12930.b))
+ -> Seq Scan on t2_12930 (cost=0.00..321.00 rows=28700 width=8)
+ -> Hash (cost=321.00..321.00 rows=28671 width=8)
+ -> Seq Scan on t1_12930 (cost=0.00..321.00 rows=28671 width=8)
+ Filter: (b IS NOT NULL)
Optimizer: Postgres query optimizer
(8 rows)
diff --git a/src/test/regress/expected/olap_window_seq_optimizer.out b/src/test/regress/expected/olap_window_seq_optimizer.out
index 51679fd24e8..0150b3f27f3 100644
--- a/src/test/regress/expected/olap_window_seq_optimizer.out
+++ b/src/test/regress/expected/olap_window_seq_optimizer.out
@@ -8965,7 +8965,7 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
--------+--------+-----------+----------+---------+---------+-------------
sum | bigint | | | | plain |
View definition:
- SELECT sum(DISTINCT g.g / 2) OVER (PARTITION BY (g.g / 4)) AS sum
+ SELECT sum(DISTINCT g / 2) OVER (PARTITION BY (g / 4)) AS sum
FROM generate_series(1, 5) g(g);
-- These are tests for pushing down filter predicates in window functions.
diff --git a/src/test/regress/expected/part_external_table_optimizer.out b/src/test/regress/expected/part_external_table_optimizer.out
index 81b69086c51..775df7af8ff 100644
--- a/src/test/regress/expected/part_external_table_optimizer.out
+++ b/src/test/regress/expected/part_external_table_optimizer.out
@@ -27,11 +27,15 @@
set optimizer_trace_fallback=on;
create schema part_external_table;
set search_path=part_external_table;
+\getenv hostname PG_HOSTNAME
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set part1_file 'file://' :hostname :abs_srcdir '/data/part1.csv'
+\set part2_file 'file://' :hostname :abs_srcdir '/data/part2.csv'
create table part (a int, b int) partition by range (b);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-create external table p1_e (a int, b int) location ('file://@hostname@@abs_srcdir@/data/part1.csv') format 'csv';
-create external table p2_e (a int, b int) location ('file://@hostname@@abs_srcdir@/data/part2.csv') format 'csv';
+create external table p1_e (a int, b int) location (:'part1_file') format 'csv';
+create external table p2_e (a int, b int) location (:'part2_file') format 'csv';
alter table part attach partition p1_e for values from (0) to (10);
NOTICE: partition constraints are not validated when attaching a readable external table
alter table part attach partition p2_e for values from (10) to (19);
@@ -416,8 +420,9 @@ explain select * from part where b > 22;
alter table part add partition exch1 start(60) end (70);
alter table part add partition exch2 start(70) end (80);
-- exchange with external tables
-create external web table p3_e (a int, b int) execute 'cat > @abs_srcdir@/data/part-ext.csv' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
-create writable external web table p4_e (a int, b int) execute 'cat > @abs_srcdir@/data/part-ext.csv' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
+\set part_ext_file 'cat > ' :abs_srcdir '/data/part-ext.csv'
+create external web table p3_e (a int, b int) execute :'part_ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
+create writable external web table p4_e (a int, b int) execute :'part_ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
-- allow exchange readable external table
alter table part exchange partition exch1 with table p3_e;
NOTICE: partition constraints are not validated when attaching a readable external table
@@ -434,8 +439,8 @@ OPTIONS ( filename '/does/not/exist.csv', format 'csv');
-- exchange works, but no error checking like for external tables
alter table part exchange partition exch2 with table ft3;
-- same tests for attach partition
-create external web table p5_e (a int, b int) execute 'cat > @abs_srcdir@/data/part-ext.csv' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
-create writable external web table p6_e (a int, b int) execute 'cat > @abs_srcdir@/data/part-ext.csv' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
+create external web table p5_e (a int, b int) execute :'part_ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
+create writable external web table p6_e (a int, b int) execute :'part_ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
-- allow attach readable external table
alter table part attach partition p5_e for values from (80) to (90);
NOTICE: partition constraints are not validated when attaching a readable external table
diff --git a/src/test/regress/expected/partition_locking_optimizer.out b/src/test/regress/expected/partition_locking_optimizer.out
index 6fb7d0f06b7..beb28113c4e 100644
--- a/src/test/regress/expected/partition_locking_optimizer.out
+++ b/src/test/regress/expected/partition_locking_optimizer.out
@@ -91,6 +91,15 @@ select * from locktest_master where coalesce not like 'gp_%' and coalesce not li
toast index | AccessExclusiveLock | relation | master
toast index | AccessExclusiveLock | relation | master
toast index | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
toast table | ShareLock | relation | master
toast table | ShareLock | relation | master
toast table | ShareLock | relation | master
@@ -100,7 +109,7 @@ select * from locktest_master where coalesce not like 'gp_%' and coalesce not li
toast table | ShareLock | relation | master
toast table | ShareLock | relation | master
toast table | ShareLock | relation | master
-(28 rows)
+(37 rows)
select * from locktest_segments where coalesce not like 'gp_%' and coalesce not like 'pg_%';
coalesce | mode | locktype | node
@@ -133,7 +142,16 @@ select * from locktest_segments where coalesce not like 'gp_%' and coalesce not
toast index | AccessExclusiveLock | relation | n segments
toast table | ShareLock | relation | n segments
toast index | AccessExclusiveLock | relation | n segments
-(28 rows)
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+(37 rows)
commit;
-- select
@@ -246,6 +264,12 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
select * from locktest_master where coalesce not like 'gp_%' and coalesce not like 'pg_%';
coalesce | mode | locktype | node
-------------------+---------------------+----------+--------
+ aoseg table | AccessExclusiveLock | relation | master
+ aoseg table | AccessExclusiveLock | relation | master
+ aoseg table | AccessExclusiveLock | relation | master
+ aovisimap table | AccessExclusiveLock | relation | master
+ aovisimap table | AccessExclusiveLock | relation | master
+ aovisimap table | AccessExclusiveLock | relation | master
partlockt | AccessExclusiveLock | relation | master
partlockt_1_prt_1 | AccessExclusiveLock | relation | master
partlockt_1_prt_2 | AccessExclusiveLock | relation | master
@@ -253,14 +277,23 @@ select * from locktest_master where coalesce not like 'gp_%' and coalesce not li
toast index | AccessExclusiveLock | relation | master
toast index | AccessExclusiveLock | relation | master
toast index | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
+ toast table | AccessExclusiveLock | relation | master
toast table | ShareLock | relation | master
toast table | ShareLock | relation | master
toast table | ShareLock | relation | master
-(10 rows)
+(19 rows)
select * from locktest_segments where coalesce not like 'gp_%' and coalesce not like 'pg_%';
coalesce | mode | locktype | node
-------------------+---------------------+----------+------------
+ aoseg table | AccessExclusiveLock | relation | n segments
+ aoseg table | AccessExclusiveLock | relation | n segments
+ aoseg table | AccessExclusiveLock | relation | n segments
+ aovisimap table | AccessExclusiveLock | relation | n segments
+ aovisimap table | AccessExclusiveLock | relation | n segments
+ aovisimap table | AccessExclusiveLock | relation | n segments
partlockt | AccessExclusiveLock | relation | n segments
partlockt_1_prt_1 | AccessExclusiveLock | relation | n segments
partlockt_1_prt_2 | AccessExclusiveLock | relation | n segments
@@ -271,7 +304,10 @@ select * from locktest_segments where coalesce not like 'gp_%' and coalesce not
toast index | AccessExclusiveLock | relation | n segments
toast table | ShareLock | relation | n segments
toast index | AccessExclusiveLock | relation | n segments
-(10 rows)
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+ toast table | AccessExclusiveLock | relation | n segments
+(19 rows)
commit;
begin;
@@ -363,54 +399,56 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
begin;
create index partlockt_idx on partlockt(i);
select * from locktest_master where coalesce not like 'gp_%' and coalesce not like 'pg_%';
- coalesce | mode | locktype | node
--------------------------+---------------------+----------+--------
- partlockt | ShareLock | relation | master
- partlockt_1_prt_1 | ShareLock | relation | master
- partlockt_1_prt_1_i_idx | AccessExclusiveLock | relation | master
- partlockt_1_prt_2 | ShareLock | relation | master
- partlockt_1_prt_2_i_idx | AccessExclusiveLock | relation | master
- partlockt_1_prt_3 | ShareLock | relation | master
- partlockt_1_prt_3_i_idx | AccessExclusiveLock | relation | master
- partlockt_1_prt_4 | ShareLock | relation | master
- partlockt_1_prt_4_i_idx | AccessExclusiveLock | relation | master
- partlockt_1_prt_5 | ShareLock | relation | master
- partlockt_1_prt_5_i_idx | AccessExclusiveLock | relation | master
- partlockt_1_prt_6 | ShareLock | relation | master
- partlockt_1_prt_6_i_idx | AccessExclusiveLock | relation | master
- partlockt_1_prt_7 | ShareLock | relation | master
- partlockt_1_prt_7_i_idx | AccessExclusiveLock | relation | master
- partlockt_1_prt_8 | ShareLock | relation | master
- partlockt_1_prt_8_i_idx | AccessExclusiveLock | relation | master
- partlockt_1_prt_9 | ShareLock | relation | master
- partlockt_1_prt_9_i_idx | AccessExclusiveLock | relation | master
- partlockt_idx | AccessExclusiveLock | relation | master
-(20 rows)
+ coalesce | mode | locktype | node
+-------------------------+--------------------------+----------+--------
+ partlockt | ShareLock | relation | master
+ partlockt_1_prt_1 | ShareLock | relation | master
+ partlockt_1_prt_1_i_idx | AccessExclusiveLock | relation | master
+ partlockt_1_prt_2 | ShareLock | relation | master
+ partlockt_1_prt_2_i_idx | AccessExclusiveLock | relation | master
+ partlockt_1_prt_3 | ShareLock | relation | master
+ partlockt_1_prt_3_i_idx | AccessExclusiveLock | relation | master
+ partlockt_1_prt_4 | ShareLock | relation | master
+ partlockt_1_prt_4_i_idx | AccessExclusiveLock | relation | master
+ partlockt_1_prt_5 | ShareLock | relation | master
+ partlockt_1_prt_5_i_idx | AccessExclusiveLock | relation | master
+ partlockt_1_prt_6 | ShareLock | relation | master
+ partlockt_1_prt_6_i_idx | AccessExclusiveLock | relation | master
+ partlockt_1_prt_7 | ShareLock | relation | master
+ partlockt_1_prt_7_i_idx | AccessExclusiveLock | relation | master
+ partlockt_1_prt_8 | ShareLock | relation | master
+ partlockt_1_prt_8_i_idx | AccessExclusiveLock | relation | master
+ partlockt_1_prt_9 | ShareLock | relation | master
+ partlockt_1_prt_9_i_idx | AccessExclusiveLock | relation | master
+ partlockt_idx | AccessExclusiveLock | relation | master
+ partlockt_idx | ShareUpdateExclusiveLock | relation | master
+(21 rows)
select * from locktest_segments where coalesce not like 'gp_%' and coalesce not like 'pg_%';
- coalesce | mode | locktype | node
--------------------------+---------------------+----------+------------
- partlockt | ShareLock | relation | n segments
- partlockt_1_prt_1 | ShareLock | relation | n segments
- partlockt_1_prt_1_i_idx | AccessExclusiveLock | relation | n segments
- partlockt_1_prt_2 | ShareLock | relation | n segments
- partlockt_1_prt_2_i_idx | AccessExclusiveLock | relation | n segments
- partlockt_1_prt_3 | ShareLock | relation | n segments
- partlockt_1_prt_3_i_idx | AccessExclusiveLock | relation | n segments
- partlockt_1_prt_4 | ShareLock | relation | n segments
- partlockt_1_prt_4_i_idx | AccessExclusiveLock | relation | n segments
- partlockt_1_prt_5 | ShareLock | relation | n segments
- partlockt_1_prt_5_i_idx | AccessExclusiveLock | relation | n segments
- partlockt_1_prt_6 | ShareLock | relation | n segments
- partlockt_1_prt_6_i_idx | AccessExclusiveLock | relation | n segments
- partlockt_1_prt_7 | ShareLock | relation | n segments
- partlockt_1_prt_7_i_idx | AccessExclusiveLock | relation | n segments
- partlockt_1_prt_8 | ShareLock | relation | n segments
- partlockt_1_prt_8_i_idx | AccessExclusiveLock | relation | n segments
- partlockt_1_prt_9 | ShareLock | relation | n segments
- partlockt_1_prt_9_i_idx | AccessExclusiveLock | relation | n segments
- partlockt_idx | AccessExclusiveLock | relation | n segments
-(20 rows)
+ coalesce | mode | locktype | node
+-------------------------+--------------------------+----------+------------
+ partlockt | ShareLock | relation | n segments
+ partlockt_1_prt_1 | ShareLock | relation | n segments
+ partlockt_1_prt_1_i_idx | AccessExclusiveLock | relation | n segments
+ partlockt_1_prt_2 | ShareLock | relation | n segments
+ partlockt_1_prt_2_i_idx | AccessExclusiveLock | relation | n segments
+ partlockt_1_prt_3 | ShareLock | relation | n segments
+ partlockt_1_prt_3_i_idx | AccessExclusiveLock | relation | n segments
+ partlockt_1_prt_4 | ShareLock | relation | n segments
+ partlockt_1_prt_4_i_idx | AccessExclusiveLock | relation | n segments
+ partlockt_1_prt_5 | ShareLock | relation | n segments
+ partlockt_1_prt_5_i_idx | AccessExclusiveLock | relation | n segments
+ partlockt_1_prt_6 | ShareLock | relation | n segments
+ partlockt_1_prt_6_i_idx | AccessExclusiveLock | relation | n segments
+ partlockt_1_prt_7 | ShareLock | relation | n segments
+ partlockt_1_prt_7_i_idx | AccessExclusiveLock | relation | n segments
+ partlockt_1_prt_8 | ShareLock | relation | n segments
+ partlockt_1_prt_8_i_idx | AccessExclusiveLock | relation | n segments
+ partlockt_1_prt_9 | ShareLock | relation | n segments
+ partlockt_1_prt_9_i_idx | AccessExclusiveLock | relation | n segments
+ partlockt_idx | AccessExclusiveLock | relation | n segments
+ partlockt_idx | ShareUpdateExclusiveLock | relation | n segments
+(21 rows)
commit;
-- Force use of the index in the select and delete below. We're not interested
@@ -483,7 +521,8 @@ select * from locktest_master where coalesce not like 'gp_%' and coalesce not li
partlockt | ExclusiveLock | relation | master
partlockt_1_prt_4 | ExclusiveLock | relation | master
partlockt_1_prt_4_i_idx | ExclusiveLock | relation | master
-(3 rows)
+ partlockt_idx | ExclusiveLock | relation | master
+(4 rows)
select * from locktest_segments where coalesce not like 'gp_%' and coalesce not like 'pg_%';
coalesce | mode | locktype | node
diff --git a/src/test/regress/expected/partition_prune_optimizer.out b/src/test/regress/expected/partition_prune_optimizer.out
index 72e87807c8a..68fca340923 100644
--- a/src/test/regress/expected/partition_prune_optimizer.out
+++ b/src/test/regress/expected/partition_prune_optimizer.out
@@ -1175,6 +1175,7 @@ create table boolpart (a bool) partition by list (a);
create table boolpart_default partition of boolpart default;
create table boolpart_t partition of boolpart for values in ('true');
create table boolpart_f partition of boolpart for values in ('false');
+insert into boolpart values (true), (false), (null);
explain (costs off) select * from boolpart where a in (true, false);
QUERY PLAN
------------------------------------------------
@@ -1255,6 +1256,300 @@ explain (costs off) select * from boolpart where a is not unknown;
Optimizer: Pivotal Optimizer (GPORCA)
(5 rows)
+select * from boolpart where a in (true, false);
+ a
+---
+ f
+ t
+(2 rows)
+
+select * from boolpart where a = false;
+ a
+---
+ f
+(1 row)
+
+select * from boolpart where not a = false;
+ a
+---
+ t
+(1 row)
+
+select * from boolpart where a is true or a is not true;
+ a
+---
+
+ f
+ t
+(3 rows)
+
+select * from boolpart where a is not true;
+ a
+---
+
+ f
+(2 rows)
+
+select * from boolpart where a is not true and a is not false;
+ a
+---
+
+(1 row)
+
+select * from boolpart where a is unknown;
+ a
+---
+
+(1 row)
+
+select * from boolpart where a is not unknown;
+ a
+---
+ f
+ t
+(2 rows)
+
+-- try some other permutations with a NULL partition instead of a DEFAULT
+delete from boolpart where a is null;
+create table boolpart_null partition of boolpart for values in (null);
+insert into boolpart values(null);
+explain (costs off) select * from boolpart where a is not true;
+ QUERY PLAN
+----------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Dynamic Seq Scan on boolpart
+ Number of partitions to scan: 2 (out of 4)
+ Filter: (a IS NOT TRUE)
+ Optimizer: GPORCA
+(5 rows)
+
+explain (costs off) select * from boolpart where a is not true and a is not false;
+ QUERY PLAN
+--------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1)
+ -> Dynamic Seq Scan on boolpart
+ Number of partitions to scan: 1 (out of 4)
+ Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE))
+ Optimizer: GPORCA
+(5 rows)
+
+explain (costs off) select * from boolpart where a is not false;
+ QUERY PLAN
+----------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Dynamic Seq Scan on boolpart
+ Number of partitions to scan: 2 (out of 4)
+ Filter: (a IS NOT FALSE)
+ Optimizer: GPORCA
+(5 rows)
+
+select * from boolpart where a is not true;
+ a
+---
+
+ f
+(2 rows)
+
+select * from boolpart where a is not true and a is not false;
+ a
+---
+
+(1 row)
+
+select * from boolpart where a is not false;
+ a
+---
+
+ t
+(2 rows)
+
+-- inverse boolean partitioning - a seemingly unlikely design, but we've got
+-- code for it, so we'd better test it.
+create table iboolpart (a bool) partition by list ((not a));
+create table iboolpart_default partition of iboolpart default;
+create table iboolpart_f partition of iboolpart for values in ('true');
+create table iboolpart_t partition of iboolpart for values in ('false');
+insert into iboolpart values (true), (false), (null);
+explain (costs off) select * from iboolpart where a in (true, false);
+ QUERY PLAN
+-------------------------------------------------------
+ Gather Motion 1:1 (slice1; segments: 1)
+ -> Append
+ -> Seq Scan on iboolpart_t iboolpart_1
+ Filter: (a = ANY ('{t,f}'::boolean[]))
+ -> Seq Scan on iboolpart_f iboolpart_2
+ Filter: (a = ANY ('{t,f}'::boolean[]))
+ -> Seq Scan on iboolpart_default iboolpart_3
+ Filter: (a = ANY ('{t,f}'::boolean[]))
+ Optimizer: Postgres query optimizer
+(9 rows)
+
+explain (costs off) select * from iboolpart where a = false;
+ QUERY PLAN
+------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on iboolpart_f iboolpart
+ Filter: (NOT a)
+ Optimizer: Postgres query optimizer
+(4 rows)
+
+explain (costs off) select * from iboolpart where not a = false;
+ QUERY PLAN
+------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on iboolpart_t iboolpart
+ Filter: a
+ Optimizer: Postgres query optimizer
+(4 rows)
+
+explain (costs off) select * from iboolpart where a is true or a is not true;
+ QUERY PLAN
+--------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Append
+ -> Seq Scan on iboolpart_t iboolpart_1
+ Filter: ((a IS TRUE) OR (a IS NOT TRUE))
+ -> Seq Scan on iboolpart_f iboolpart_2
+ Filter: ((a IS TRUE) OR (a IS NOT TRUE))
+ -> Seq Scan on iboolpart_default iboolpart_3
+ Filter: ((a IS TRUE) OR (a IS NOT TRUE))
+ Optimizer: Postgres query optimizer
+(9 rows)
+
+explain (costs off) select * from iboolpart where a is not true;
+ QUERY PLAN
+-------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Append
+ -> Seq Scan on iboolpart_t iboolpart_1
+ Filter: (a IS NOT TRUE)
+ -> Seq Scan on iboolpart_f iboolpart_2
+ Filter: (a IS NOT TRUE)
+ -> Seq Scan on iboolpart_default iboolpart_3
+ Filter: (a IS NOT TRUE)
+ Optimizer: Postgres query optimizer
+(9 rows)
+
+explain (costs off) select * from iboolpart where a is not true and a is not false;
+ QUERY PLAN
+--------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Append
+ -> Seq Scan on iboolpart_t iboolpart_1
+ Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE))
+ -> Seq Scan on iboolpart_f iboolpart_2
+ Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE))
+ -> Seq Scan on iboolpart_default iboolpart_3
+ Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE))
+ Optimizer: Postgres query optimizer
+(9 rows)
+
+explain (costs off) select * from iboolpart where a is unknown;
+ QUERY PLAN
+-------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Append
+ -> Seq Scan on iboolpart_t iboolpart_1
+ Filter: (a IS UNKNOWN)
+ -> Seq Scan on iboolpart_f iboolpart_2
+ Filter: (a IS UNKNOWN)
+ -> Seq Scan on iboolpart_default iboolpart_3
+ Filter: (a IS UNKNOWN)
+ Optimizer: Postgres query optimizer
+(9 rows)
+
+explain (costs off) select * from iboolpart where a is not unknown;
+ QUERY PLAN
+-------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Append
+ -> Seq Scan on iboolpart_t iboolpart_1
+ Filter: (a IS NOT UNKNOWN)
+ -> Seq Scan on iboolpart_f iboolpart_2
+ Filter: (a IS NOT UNKNOWN)
+ -> Seq Scan on iboolpart_default iboolpart_3
+ Filter: (a IS NOT UNKNOWN)
+ Optimizer: Postgres query optimizer
+(9 rows)
+
+select * from iboolpart where a in (true, false);
+ a
+---
+ f
+ t
+(2 rows)
+
+select * from iboolpart where a = false;
+ a
+---
+ f
+(1 row)
+
+select * from iboolpart where not a = false;
+ a
+---
+ t
+(1 row)
+
+select * from iboolpart where a is true or a is not true;
+ a
+---
+
+ f
+ t
+(3 rows)
+
+select * from iboolpart where a is not true;
+ a
+---
+
+ f
+(2 rows)
+
+select * from iboolpart where a is not true and a is not false;
+ a
+---
+
+(1 row)
+
+select * from iboolpart where a is unknown;
+ a
+---
+
+(1 row)
+
+select * from iboolpart where a is not unknown;
+ a
+---
+ f
+ t
+(2 rows)
+
+-- Try some other permutations with a NULL partition instead of a DEFAULT
+delete from iboolpart where a is null;
+create table iboolpart_null partition of iboolpart for values in (null);
+insert into iboolpart values(null);
+-- Pruning shouldn't take place for these. Just check the result is correct
+select * from iboolpart where a is not true;
+ a
+---
+
+ f
+(2 rows)
+
+select * from iboolpart where a is not true and a is not false;
+ a
+---
+
+(1 row)
+
+select * from iboolpart where a is not false;
+ a
+---
+
+ t
+(2 rows)
+
create table boolrangep (a bool, b bool, c int) partition by range (a,b,c);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
@@ -1265,6 +1560,7 @@ NOTICE: table has parent, setting distribution columns to match parent table
create table boolrangep_ff1 partition of boolrangep for values from ('false', 'false', 0) to ('false', 'false', 50);
NOTICE: table has parent, setting distribution columns to match parent table
create table boolrangep_ff2 partition of boolrangep for values from ('false', 'false', 50) to ('false', 'false', 100);
+create table boolrangep_null partition of boolrangep default;
NOTICE: table has parent, setting distribution columns to match parent table
-- try a more complex case that's been known to trip up pruning in the past
explain (costs off) select * from boolrangep where not a and not b and c = 25;
@@ -1276,6 +1572,32 @@ explain (costs off) select * from boolrangep where not a and not b and c = 25;
Optimizer: Postgres query optimizer
(4 rows)
+-- ensure we prune boolrangep_tf
+explain (costs off) select * from boolrangep where a is not true and not b and c = 25;
+QUERY PLAN
+___________
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Append
+ -> Seq Scan on boolrangep_ff1 boolrangep_1
+ Filter: ((a IS NOT TRUE) AND (NOT b) AND (c = 25))
+ -> Seq Scan on boolrangep_ff2 boolrangep_2
+ Filter: ((a IS NOT TRUE) AND (NOT b) AND (c = 25))
+ -> Seq Scan on boolrangep_ft boolrangep_3
+ Filter: ((a IS NOT TRUE) AND (NOT b) AND (c = 25))
+ -> Seq Scan on boolrangep_null boolrangep_4
+ Filter: ((a IS NOT TRUE) AND (NOT b) AND (c = 25))
+
+-- ensure we prune everything apart from boolrangep_tf and boolrangep_null
+explain (costs off) select * from boolrangep where a is not false and not b and c = 25;
+QUERY PLAN
+___________
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Append
+ -> Seq Scan on boolrangep_tf boolrangep_1
+ Filter: ((a IS NOT FALSE) AND (NOT b) AND (c = 25))
+ -> Seq Scan on boolrangep_null boolrangep_2
+ Filter: ((a IS NOT FALSE) AND (NOT b) AND (c = 25))
+
-- test scalar-to-array operators
create table coercepart (a varchar) partition by list (a);
create table coercepart_ab partition of coercepart for values in ('ab');
@@ -1686,7 +2008,7 @@ explain (costs off) select * from like_op_noprune where a like '%BC';
create table lparted_by_int2 (a smallint) partition by list (a);
create table lparted_by_int2_1 partition of lparted_by_int2 for values in (1);
create table lparted_by_int2_16384 partition of lparted_by_int2 for values in (16384);
-explain (costs off) select * from lparted_by_int2 where a = 100000000000000;
+explain (costs off) select * from lparted_by_int2 where a = 100_000_000_000_000;
QUERY PLAN
---------------------------------------
Result
@@ -1698,7 +2020,7 @@ create table rparted_by_int2 (a smallint) partition by range (a);
create table rparted_by_int2_1 partition of rparted_by_int2 for values from (1) to (10);
create table rparted_by_int2_16384 partition of rparted_by_int2 for values from (10) to (16384);
-- all partitions pruned
-explain (costs off) select * from rparted_by_int2 where a > 100000000000000;
+explain (costs off) select * from rparted_by_int2 where a > 100_000_000_000_000;
QUERY PLAN
-------------------------------------
Result
@@ -1708,7 +2030,7 @@ explain (costs off) select * from rparted_by_int2 where a > 100000000000000;
create table rparted_by_int2_maxvalue partition of rparted_by_int2 for values from (16384) to (maxvalue);
-- all partitions but rparted_by_int2_maxvalue pruned
-explain (costs off) select * from rparted_by_int2 where a > 100000000000000;
+explain (costs off) select * from rparted_by_int2 where a > 100_000_000_000_000;
QUERY PLAN
-------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
@@ -1718,7 +2040,39 @@ explain (costs off) select * from rparted_by_int2 where a > 100000000000000;
Optimizer: Pivotal Optimizer (GPORCA)
(5 rows)
-drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, boolrangep, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2;
+drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, iboolpart, boolrangep, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2;
+-- check that AlternativeSubPlan within a pruning expression gets cleaned up
+create table asptab (id int primary key) partition by range (id);
+create table asptab0 partition of asptab for values from (0) to (1);
+create table asptab1 partition of asptab for values from (1) to (2);
+explain (costs off)
+select * from
+ (select exists (select 1 from int4_tbl tinner where f1 = touter.f1) as b
+ from int4_tbl touter) ss,
+ asptab
+where asptab.id > ss.b::int;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ Join Filter: true
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> GroupAggregate
+ Group Key: touter.f1, touter.ctid, touter.gp_segment_id, (true)
+ -> Sort
+ Sort Key: touter.f1, touter.ctid, touter.gp_segment_id, (true)
+ -> Hash Left Join
+ Hash Cond: (touter.f1 = tinner.f1)
+ -> Seq Scan on int4_tbl touter
+ -> Hash
+ -> Seq Scan on int4_tbl tinner
+ -> Dynamic Index Scan on asptab_pkey on asptab
+ Index Cond: (id > ((CASE WHEN (NOT ((true) IS NULL)) THEN true ELSE false END))::integer)
+ Number of partitions to scan: 2 (out of 2)
+ Optimizer: GPORCA
+(17 rows)
+
+drop table asptab;
--
-- Test Partition pruning for HASH partitioning
--
@@ -1961,9 +2315,9 @@ explain (costs off) select * from hp where a = 1 and b = 'abcde' and
--------------------------
Result
One-Time Filter: false
-(2 rows)
+ Optimizer: Postgres query optimizer
+(3 rows)
-drop table hp;
--
-- Test runtime partition pruning
--
@@ -2097,6 +2451,27 @@ explain (analyze, costs off, summary off, timing off) execute ab_q3 (2, 2);
Optimizer: Postgres query optimizer
(12 rows)
+--
+-- Test runtime pruning with hash partitioned tables
+--
+-- recreate partitions dropped above
+create table hp1 partition of hp for values with (modulus 4, remainder 1);
+create table hp2 partition of hp for values with (modulus 4, remainder 2);
+create table hp3 partition of hp for values with (modulus 4, remainder 3);
+-- Ensure we correctly prune unneeded partitions when there is an IS NULL qual
+prepare hp_q1 (text) as
+select * from hp where a is null and b = $1;
+explain (costs off) execute hp_q1('xxx');
+QUERY PLAN
+___________
+ Gather Motion XXX
+ -> Append
+ Subplans Removed: 3
+ -> Seq Scan on hp2 hp_1
+ Filter: ((a IS NULL) AND (b = $1))
+
+deallocate hp_q1;
+drop table hp;
-- Test a backwards Append scan
create table list_part (a int) partition by list (a);
create table list_part1 partition of list_part for values in (1);
@@ -3597,7 +3972,7 @@ explain (costs off) select * from pph_arrpart where a in ('{4, 5}', '{1}');
drop table pph_arrpart;
-- enum type list partition key
create type pp_colors as enum ('green', 'blue', 'black');
-create table pp_enumpart (a pp_colors) partition by list (a);
+create table pp_enumpart (col1 int, a pp_colors) partition by list (a);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create table pp_enumpart_green partition of pp_enumpart for values in ('green');
@@ -3605,7 +3980,7 @@ create table pp_enumpart_blue partition of pp_enumpart for values in ('blue');
explain (costs off) select * from pp_enumpart where a = 'blue';
QUERY PLAN
------------------------------------------
- Gather Motion 1:1 (slice1; segments: 1)
+ Gather Motion 3:1 (slice1; segments: 3)
-> Dynamic Seq Scan on pp_enumpart
Number of partitions to scan: 1 (out of 2)
Filter: (a = 'blue'::pp_colors)
@@ -4018,7 +4393,7 @@ explain (costs off) update listp1 set a = 1 where a = 2;
-> Result
-> Redistribute Motion 3:3 (slice1; segments: 3)
Hash Key: a
- -> Split
+ -> Split Update
-> Seq Scan on listp1
Filter: (a = 2)
Optimizer: Pivotal Optimizer (GPORCA)
@@ -4044,7 +4419,7 @@ explain (costs off) update listp1 set a = 1 where a = 2;
-> Result
-> Redistribute Motion 3:3 (slice1; segments: 3)
Hash Key: a
- -> Split
+ -> Split Update
-> Seq Scan on listp1
Filter: (a = 2)
Optimizer: Pivotal Optimizer (GPORCA)
@@ -4207,22 +4582,233 @@ explain (costs off) select * from rp_prefix_test3 where a >= 1 and b >= 1 and b
Optimizer: Postgres query optimizer
(4 rows)
-create table hp_prefix_test (a int, b int, c int, d int) partition by hash (a part_test_int4_ops, b part_test_int4_ops, c part_test_int4_ops, d part_test_int4_ops);
-create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 2, remainder 0);
-create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 2, remainder 1);
--- Test that get_steps_using_prefix() handles non-NULL step_nullkeys
-explain (costs off) select * from hp_prefix_test where a = 1 and b is null and c = 1 and d = 1;
- QUERY PLAN
--------------------------------------------------------------------
- Gather Motion 1:1 (slice1; segments: 1)
- -> Seq Scan on hp_prefix_test_p1 hp_prefix_test
- Filter: ((b IS NULL) AND (a = 1) AND (c = 1) AND (d = 1))
- Optimizer: Postgres query optimizer
-(4 rows)
-
drop table rp_prefix_test1;
drop table rp_prefix_test2;
drop table rp_prefix_test3;
+--
+-- Test that get_steps_using_prefix() handles IS NULL clauses correctly
+--
+create table hp_prefix_test (a int, b int, c int, d int)
+ partition by hash (a part_test_int4_ops, b part_test_int4_ops, c part_test_int4_ops, d part_test_int4_ops);
+-- create 8 partitions
+select 'create table hp_prefix_test_p' || x::text || ' partition of hp_prefix_test for values with (modulus 8, remainder ' || x::text || ');'
+from generate_Series(0,7) x;
+ ?column?
+------------------------------------------------------------------------------------------------------
+ create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0);
+ create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1);
+ create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2);
+ create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3);
+ create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4);
+ create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5);
+ create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6);
+ create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7);
+(8 rows)
+
+\gexec
+create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0);
+create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1);
+create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2);
+create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3);
+create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4);
+create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5);
+create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6);
+create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7);
+-- insert 16 rows, one row for each test to perform.
+insert into hp_prefix_test
+select
+ case a when 0 then null else 1 end,
+ case b when 0 then null else 2 end,
+ case c when 0 then null else 3 end,
+ case d when 0 then null else 4 end
+from
+ generate_series(0,1) a,
+ generate_series(0,1) b,
+ generate_Series(0,1) c,
+ generate_Series(0,1) d;
+-- Ensure partition pruning works correctly for each combination of IS NULL
+-- and equality quals. This may seem a little excessive, but there have been
+-- a number of bugs in this area over the years. We make use of row only
+-- output to reduce the size of the expected results.
+\t on
+select
+ 'explain (costs off) select tableoid::regclass,* from hp_prefix_test where ' ||
+ string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos)
+from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s)
+group by g.s
+order by g.s;
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4
+ explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4
+
+\gexec
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p0 hp_prefix_test
+ Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d IS NULL))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p1 hp_prefix_test
+ Filter: ((b IS NULL) AND (c IS NULL) AND (d IS NULL) AND (a = 1))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p2 hp_prefix_test
+ Filter: ((a IS NULL) AND (c IS NULL) AND (d IS NULL) AND (b = 2))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p4 hp_prefix_test
+ Filter: ((c IS NULL) AND (d IS NULL) AND (a = 1) AND (b = 2))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p3 hp_prefix_test
+ Filter: ((a IS NULL) AND (b IS NULL) AND (d IS NULL) AND (c = 3))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p7 hp_prefix_test
+ Filter: ((b IS NULL) AND (d IS NULL) AND (a = 1) AND (c = 3))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p4 hp_prefix_test
+ Filter: ((a IS NULL) AND (d IS NULL) AND (b = 2) AND (c = 3))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p5 hp_prefix_test
+ Filter: ((d IS NULL) AND (a = 1) AND (b = 2) AND (c = 3))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p4 hp_prefix_test
+ Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d = 4))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p6 hp_prefix_test
+ Filter: ((b IS NULL) AND (c IS NULL) AND (a = 1) AND (d = 4))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p5 hp_prefix_test
+ Filter: ((a IS NULL) AND (c IS NULL) AND (b = 2) AND (d = 4))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p6 hp_prefix_test
+ Filter: ((c IS NULL) AND (a = 1) AND (b = 2) AND (d = 4))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p4 hp_prefix_test
+ Filter: ((a IS NULL) AND (b IS NULL) AND (c = 3) AND (d = 4))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p5 hp_prefix_test
+ Filter: ((b IS NULL) AND (a = 1) AND (c = 3) AND (d = 4))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p6 hp_prefix_test
+ Filter: ((a IS NULL) AND (b = 2) AND (c = 3) AND (d = 4))
+
+explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4
+ Gather Motion XXX
+ -> Seq Scan on hp_prefix_test_p4 hp_prefix_test
+ Filter: ((a = 1) AND (b = 2) AND (c = 3) AND (d = 4))
+
+-- And ensure we get exactly 1 row from each. Again, all 16 possible combinations.
+select
+ 'select tableoid::regclass,* from hp_prefix_test where ' ||
+ string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos)
+from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s)
+group by g.s
+order by g.s;
+ select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null
+ select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null
+ select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null
+ select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null
+ select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null
+ select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null
+ select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null
+ select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null
+ select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4
+ select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4
+ select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4
+ select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4
+ select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4
+ select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4
+ select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4
+ select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4
+
+\gexec
+select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null
+ hp_prefix_test_p0 | | | |
+
+select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null
+ hp_prefix_test_p1 | 1 | | |
+
+select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null
+ hp_prefix_test_p2 | | 2 | |
+
+select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null
+ hp_prefix_test_p4 | 1 | 2 | |
+
+select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null
+ hp_prefix_test_p3 | | | 3 |
+
+select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null
+ hp_prefix_test_p7 | 1 | | 3 |
+
+select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null
+ hp_prefix_test_p4 | | 2 | 3 |
+
+select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null
+ hp_prefix_test_p5 | 1 | 2 | 3 |
+
+select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4
+ hp_prefix_test_p4 | | | | 4
+
+select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4
+ hp_prefix_test_p6 | 1 | | | 4
+
+select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4
+ hp_prefix_test_p5 | | 2 | | 4
+
+select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4
+ hp_prefix_test_p6 | 1 | 2 | | 4
+
+select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4
+ hp_prefix_test_p4 | | | 3 | 4
+
+select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4
+ hp_prefix_test_p5 | 1 | | 3 | 4
+
+select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4
+ hp_prefix_test_p6 | | 2 | 3 | 4
+
+select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4
+ hp_prefix_test_p4 | 1 | 2 | 3 | 4
+
+\t off
drop table hp_prefix_test;
--
-- Check that gen_partprune_steps() detects self-contradiction from clauses
diff --git a/src/test/regress/expected/pg_lsn_optimizer.out b/src/test/regress/expected/pg_lsn_optimizer.out
index ab6f0b0132d..68b95270d6b 100644
--- a/src/test/regress/expected/pg_lsn_optimizer.out
+++ b/src/test/regress/expected/pg_lsn_optimizer.out
@@ -26,6 +26,19 @@ INSERT INTO PG_LSN_TBL VALUES ('/ABCD');
ERROR: invalid input syntax for type pg_lsn: "/ABCD"
LINE 1: INSERT INTO PG_LSN_TBL VALUES ('/ABCD');
^
+-- Also try it with non-error-throwing API
+SELECT pg_input_is_valid('16AE7F7', 'pg_lsn');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT * FROM pg_input_error_info('16AE7F7', 'pg_lsn');
+ message | detail | hint | sql_error_code
+-------------------------------------------------+--------+------+----------------
+ invalid input syntax for type pg_lsn: "16AE7F7" | | | 22P02
+(1 row)
+
-- Min/Max aggregation
SELECT MIN(f1), MAX(f1) FROM PG_LSN_TBL;
min | max
diff --git a/src/test/regress/expected/plpgsql_optimizer.out b/src/test/regress/expected/plpgsql_optimizer.out
index d31a38d9f42..79c5051615e 100755
--- a/src/test/regress/expected/plpgsql_optimizer.out
+++ b/src/test/regress/expected/plpgsql_optimizer.out
@@ -2473,6 +2473,32 @@ select refcursor_test2(20000, 20000) as "Should be false",
f | t
(1 row)
+-- should fail
+create function constant_refcursor() returns refcursor as $$
+declare
+ rc constant refcursor;
+begin
+ open rc for select a from rc_test;
+ return rc;
+end
+$$ language plpgsql;
+select constant_refcursor();
+ERROR: variable "rc" is declared CONSTANT
+-- but it's okay like this
+create or replace function constant_refcursor() returns refcursor as $$
+declare
+ rc constant refcursor := 'my_cursor_name';
+begin
+ open rc for select a from rc_test;
+ return rc;
+end
+$$ language plpgsql;
+select constant_refcursor();
+ constant_refcursor
+--------------------
+ my_cursor_name
+(1 row)
+
--
-- tests for cursors with named parameter arguments
--
@@ -2578,13 +2604,11 @@ begin
end $$ language plpgsql;
select namedparmcursor_test7();
ERROR: division by zero
-CONTEXT: SQL statement "SELECT 42/0 AS p1, 77 AS p2;"
+CONTEXT: SQL expression "42/0 AS p1, 77 AS p2"
PL/pgSQL function namedparmcursor_test7() line 6 at OPEN
--- check that line comments work correctly within the argument list (there
--- is some special handling of this case in the code: the newline after the
--- comment must be preserved when the argument-evaluating query is
--- constructed, otherwise the comment effectively comments out the next
--- argument, too)
+-- check that line comments work correctly within the argument list
+-- (this used to require a special hack in the code; it no longer does,
+-- but let's keep the test anyway)
create function namedparmcursor_test8() returns int4 as $$
declare
c1 cursor (p1 int, p2 int) for
@@ -3567,22 +3591,22 @@ select * from ret_query1();
create type record_type as (x text, y int, z boolean);
create or replace function ret_query2(lim int) returns setof record_type as $$
begin
- return query select md5(s.x::text), s.x, s.x > 0
+ return query select fipshash(s.x::text), s.x, s.x > 0
from generate_series(-8, lim) s (x) where s.x % 2 = 0;
end;
$$ language plpgsql;
select * from ret_query2(8);
x | y | z
----------------------------------+----+---
- a8d2ec85eaf98407310b72eb73dda247 | -8 | f
- 596a3d04481816330f07e4f97510c28f | -6 | f
- 0267aaf632e87a63288a08331f22c7c3 | -4 | f
- 5d7b9adcbe1c629ec722529dd12e5129 | -2 | f
- cfcd208495d565ef66e7dff9f98764da | 0 | f
- c81e728d9d4c2f636f067f89cc14862c | 2 | t
- a87ff679a2f3e71d9181a67b7542122c | 4 | t
- 1679091c5a880faf6fb5e6087eb1b2dc | 6 | t
- c9f0f895fb98ab9159f51fd0297e236d | 8 | t
+ 03b26944890929ff751653acb2f2af79 | -6 | f
+ 2c624232cdd221771294dfbb310aca00 | 8 | t
+ 4b227777d4dd1fc61c6f884f48641d02 | 4 | t
+ 5feceb66ffc86f38d952786c6d696c79 | 0 | f
+ cf3bae39dd692048a8bf961182e6a34d | -2 | f
+ d4735e3a265e16eee03f59718b9b5d03 | 2 | t
+ e5e0093f285a4fb94c3fcc2ad7fd04ed | -4 | f
+ e7f6c011776e8db7cd330b54174fd76f | 6 | t
+ e91592205d3881e3ea35d66973bb4898 | -8 | f
(9 rows)
-- test EXECUTE USING
@@ -3645,6 +3669,9 @@ declare
c2 cursor
for select * from generate_series(41,43) i;
begin
+ -- assign portal names to cursors to get stable output
+ c := 'c';
+ c2 := 'c2';
for r in c(5,7) loop
raise notice '% from %', r.i, c;
end loop;
@@ -3787,6 +3814,22 @@ select i, j from forc_test;
(10 rows)
drop function forc01();
+-- it's okay to re-use a cursor variable name, even when bound
+do $$
+declare cnt int := 0;
+ c1 cursor for select * from forc_test;
+begin
+ for r1 in c1 loop
+ declare c1 cursor for select * from forc_test;
+ begin
+ for r2 in c1 loop
+ cnt := cnt + 1;
+ end loop;
+ end;
+ end loop;
+ raise notice 'cnt = %', cnt;
+end $$;
+NOTICE: cnt = 100
-- fail because cursor has no query bound to it
create or replace function forc_bad() returns void as $$
declare
@@ -4827,24 +4870,48 @@ NOTICE: caught division by zero
NOTICE: caught division by zero
NOTICE: caught division by zero
-- Check variable scoping -- a var is not available in its own or prior
--- default expressions.
-create function scope_test() returns int as $$
+-- default expressions, but it is available in later ones.
+do $$
+declare x int := x + 1; -- error
+begin
+ raise notice 'x = %', x;
+end;
+$$;
+ERROR: column "x" does not exist
+LINE 1: x + 1
+ ^
+QUERY: x + 1
+do $$
+declare y int := x + 1; -- error
+ x int := 42;
+begin
+ raise notice 'x = %, y = %', x, y;
+end;
+$$;
+ERROR: column "x" does not exist
+LINE 1: x + 1
+ ^
+QUERY: x + 1
+do $$
+declare x int := 42;
+ y int := x + 1;
+begin
+ raise notice 'x = %, y = %', x, y;
+end;
+$$;
+NOTICE: x = 42, y = 43
+do $$
declare x int := 42;
begin
declare y int := x + 1;
x int := x + 2;
+ z int := x * 10;
begin
- return x * 100 + y;
+ raise notice 'x = %, y = %, z = %', x, y, z;
end;
end;
-$$ language plpgsql;
-select scope_test();
- scope_test
-------------
- 4443
-(1 row)
-
-drop function scope_test();
+$$;
+NOTICE: x = 44, y = 43, z = 440
-- Check handling of conflicts between plpgsql vars and table columns.
set plpgsql.variable_conflict = error;
create function conflict_test() returns setof int8_tbl as $$
@@ -5355,7 +5422,7 @@ NOTICE: outer_func() done
20
(1 row)
--- repeated call should to work
+-- repeated call should work
select outer_outer_func(20);
NOTICE: calling down into outer_func()
NOTICE: calling down into inner_func()
@@ -5437,7 +5504,7 @@ NOTICE: outer_func() done
20
(1 row)
--- repeated call should to work
+-- repeated call should work
select outer_outer_func(20);
NOTICE: calling down into outer_func()
NOTICE: calling down into inner_func()
@@ -5458,6 +5525,33 @@ NOTICE: outer_func() done
drop function outer_outer_func(int);
drop function outer_func(int);
drop function inner_func(int);
+-- Test pg_routine_oid
+create function current_function(text)
+returns regprocedure as $$
+declare
+ fn_oid regprocedure;
+begin
+ get diagnostics fn_oid = pg_routine_oid;
+ return fn_oid;
+end;
+$$ language plpgsql;
+select current_function('foo');
+ current_function
+------------------------
+ current_function(text)
+(1 row)
+
+drop function current_function(text);
+-- shouldn't fail in DO, even though there's no useful data
+do $$
+declare
+ fn_oid oid;
+begin
+ get diagnostics fn_oid = pg_routine_oid;
+ raise notice 'pg_routine_oid = %', fn_oid;
+end;
+$$;
+NOTICE: pg_routine_oid = 0
--
-- Test ASSERT
--
diff --git a/src/test/regress/expected/polygon_optimizer.out b/src/test/regress/expected/polygon_optimizer.out
index 9b61fc7ad40..eb3342a14b1 100644
--- a/src/test/regress/expected/polygon_optimizer.out
+++ b/src/test/regress/expected/polygon_optimizer.out
@@ -326,3 +326,28 @@ WHERE seq.id IS NULL OR idx.id IS NULL;
RESET enable_seqscan;
RESET enable_indexscan;
RESET enable_bitmapscan;
+-- test non-error-throwing API for some core types
+SELECT pg_input_is_valid('(2.0,0.8,0.1)', 'polygon');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT * FROM pg_input_error_info('(2.0,0.8,0.1)', 'polygon');
+ message | detail | hint | sql_error_code
+--------------------------------------------------------+--------+------+----------------
+ invalid input syntax for type polygon: "(2.0,0.8,0.1)" | | | 22P02
+(1 row)
+
+SELECT pg_input_is_valid('(2.0,xyz)', 'polygon');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+SELECT * FROM pg_input_error_info('(2.0,xyz)', 'polygon');
+ message | detail | hint | sql_error_code
+----------------------------------------------------+--------+------+----------------
+ invalid input syntax for type polygon: "(2.0,xyz)" | | | 22P02
+(1 row)
+
diff --git a/src/test/regress/expected/portals_optimizer.out b/src/test/regress/expected/portals_optimizer.out
index e87f720ac91..9ab9e4d63f0 100644
--- a/src/test/regress/expected/portals_optimizer.out
+++ b/src/test/regress/expected/portals_optimizer.out
@@ -468,6 +468,23 @@ FETCH 1 FROM foo24;
FETCH BACKWARD 1 FROM foo24; -- should fail
ERROR: backward scan is not supported in this version of Apache Cloudberry
END;
+BEGIN;
+DECLARE foo24 NO SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2;
+FETCH 1 FROM foo24;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx
+(1 row)
+
+FETCH ABSOLUTE 2 FROM foo24; -- allowed
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx
+(1 row)
+
+FETCH ABSOLUTE 1 FROM foo24; -- should fail
+ERROR: backward scan is not supported in this version of Apache Cloudberry
+END;
--
-- Cursors outside transaction blocks
--
@@ -1271,3 +1288,28 @@ FETCH ALL FROM foo2;
(0 rows)
CLOSE foo2;
+-- Check fetching of toasted datums via cursors.
+begin;
+-- Other compression algorithms may cause the compressed data to be stored
+-- inline. Use pglz to ensure consistent results.
+set default_toast_compression = 'pglz';
+create table toasted_data (f1 int[]);
+insert into toasted_data
+ select array_agg(i) from generate_series(12345678, 12345678 + 1000) i;
+declare local_portal cursor for select * from toasted_data;
+fetch all in local_portal;
+ f1
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ {12345678,12345679,12345680,12345681,12345682,12345683,12345684,12345685,12345686,12345687,12345688,12345689,12345690,12345691,12345692,12345693,12345694,12345695,12345696,12345697,12345698,12345699,12345700,12345701,12345702,12345703,12345704,12345705,12345706,12345707,12345708,12345709,12345710,12345711,12345712,12345713,12345714,12345715,12345716,12345717,12345718,12345719,12345720,12345721,12345722,12345723,12345724,12345725,12345726,12345727,12345728,12345729,12345730,12345731,12345732,12345733,12345734,12345735,12345736,12345737,12345738,12345739,12345740,12345741,12345742,12345743,12345744,12345745,12345746,12345747,12345748,12345749,12345750,12345751,12345752,12345753,12345754,12345755,12345756,12345757,12345758,12345759,12345760,12345761,12345762,12345763,12345764,12345765,12345766,12345767,12345768,12345769,12345770,12345771,12345772,12345773,12345774,12345775,12345776,12345777,12345778,12345779,12345780,12345781,12345782,12345783,12345784,12345785,12345786,12345787,12345788,12345789,12345790,12345791,12345792,12345793,12345794,12345795,12345796,12345797,12345798,12345799,12345800,12345801,12345802,12345803,12345804,12345805,12345806,12345807,12345808,12345809,12345810,12345811,12345812,12345813,12345814,12345815,12345816,12345817,12345818,12345819,12345820,12345821,12345822,12345823,12345824,12345825,12345826,12345827,12345828,12345829,12345830,12345831,12345832,12345833,12345834,12345835,12345836,12345837,12345838,12345839,12345840,12345841,12345842,12345843,12345844,12345845,12345846,12345847,12345848,12345849,12345850,12345851,12345852,12345853,12345854,12345855,12345856,12345857,12345858,12345859,12345860,12345861,12345862,12345863,12345864,12345865,12345866,12345867,12345868,12345869,12345870,12345871,12345872,12345873,12345874,12345875,12345876,12345877,12345878,12345879,12345880,12345881,12345882,12345883,12345884,12345885,12345886,12345887,12345888,12345889,12345890,12345891,12345892,12345893,12345894,12345895,12345896,12345897,12345898,12345899,12345900,12345901,12345902,12345903,12345904,12345905,12345906,12345907,12345908,12345909,12345910,12345911,12345912,12345913,12345914,12345915,12345916,12345917,12345918,12345919,12345920,12345921,12345922,12345923,12345924,12345925,12345926,12345927,12345928,12345929,12345930,12345931,12345932,12345933,12345934,12345935,12345936,12345937,12345938,12345939,12345940,12345941,12345942,12345943,12345944,12345945,12345946,12345947,12345948,12345949,12345950,12345951,12345952,12345953,12345954,12345955,12345956,12345957,12345958,12345959,12345960,12345961,12345962,12345963,12345964,12345965,12345966,12345967,12345968,12345969,12345970,12345971,12345972,12345973,12345974,12345975,12345976,12345977,12345978,12345979,12345980,12345981,12345982,12345983,12345984,12345985,12345986,12345987,12345988,12345989,12345990,12345991,12345992,12345993,12345994,12345995,12345996,12345997,12345998,12345999,12346000,12346001,12346002,12346003,12346004,12346005,12346006,12346007,12346008,12346009,12346010,12346011,12346012,12346013,12346014,12346015,12346016,12346017,12346018,12346019,12346020,12346021,12346022,12346023,12346024,12346025,12346026,12346027,12346028,12346029,12346030,12346031,12346032,12346033,12346034,12346035,12346036,12346037,12346038,12346039,12346040,12346041,12346042,12346043,12346044,12346045,12346046,12346047,12346048,12346049,12346050,12346051,12346052,12346053,12346054,12346055,12346056,12346057,12346058,12346059,12346060,12346061,12346062,12346063,12346064,12346065,12346066,12346067,12346068,12346069,12346070,12346071,12346072,12346073,12346074,12346075,12346076,12346077,12346078,12346079,12346080,12346081,12346082,12346083,12346084,12346085,12346086,12346087,12346088,12346089,12346090,12346091,12346092,12346093,12346094,12346095,12346096,12346097,12346098,12346099,12346100,12346101,12346102,12346103,12346104,12346105,12346106,12346107,12346108,12346109,12346110,12346111,12346112,12346113,12346114,12346115,12346116,12346117,12346118,12346119,12346120,12346121,12346122,12346123,12346124,12346125,12346126,12346127,12346128,12346129,12346130,12346131,12346132,12346133,12346134,12346135,12346136,12346137,12346138,12346139,12346140,12346141,12346142,12346143,12346144,12346145,12346146,12346147,12346148,12346149,12346150,12346151,12346152,12346153,12346154,12346155,12346156,12346157,12346158,12346159,12346160,12346161,12346162,12346163,12346164,12346165,12346166,12346167,12346168,12346169,12346170,12346171,12346172,12346173,12346174,12346175,12346176,12346177,12346178,12346179,12346180,12346181,12346182,12346183,12346184,12346185,12346186,12346187,12346188,12346189,12346190,12346191,12346192,12346193,12346194,12346195,12346196,12346197,12346198,12346199,12346200,12346201,12346202,12346203,12346204,12346205,12346206,12346207,12346208,12346209,12346210,12346211,12346212,12346213,12346214,12346215,12346216,12346217,12346218,12346219,12346220,12346221,12346222,12346223,12346224,12346225,12346226,12346227,12346228,12346229,12346230,12346231,12346232,12346233,12346234,12346235,12346236,12346237,12346238,12346239,12346240,12346241,12346242,12346243,12346244,12346245,12346246,12346247,12346248,12346249,12346250,12346251,12346252,12346253,12346254,12346255,12346256,12346257,12346258,12346259,12346260,12346261,12346262,12346263,12346264,12346265,12346266,12346267,12346268,12346269,12346270,12346271,12346272,12346273,12346274,12346275,12346276,12346277,12346278,12346279,12346280,12346281,12346282,12346283,12346284,12346285,12346286,12346287,12346288,12346289,12346290,12346291,12346292,12346293,12346294,12346295,12346296,12346297,12346298,12346299,12346300,12346301,12346302,12346303,12346304,12346305,12346306,12346307,12346308,12346309,12346310,12346311,12346312,12346313,12346314,12346315,12346316,12346317,12346318,12346319,12346320,12346321,12346322,12346323,12346324,12346325,12346326,12346327,12346328,12346329,12346330,12346331,12346332,12346333,12346334,12346335,12346336,12346337,12346338,12346339,12346340,12346341,12346342,12346343,12346344,12346345,12346346,12346347,12346348,12346349,12346350,12346351,12346352,12346353,12346354,12346355,12346356,12346357,12346358,12346359,12346360,12346361,12346362,12346363,12346364,12346365,12346366,12346367,12346368,12346369,12346370,12346371,12346372,12346373,12346374,12346375,12346376,12346377,12346378,12346379,12346380,12346381,12346382,12346383,12346384,12346385,12346386,12346387,12346388,12346389,12346390,12346391,12346392,12346393,12346394,12346395,12346396,12346397,12346398,12346399,12346400,12346401,12346402,12346403,12346404,12346405,12346406,12346407,12346408,12346409,12346410,12346411,12346412,12346413,12346414,12346415,12346416,12346417,12346418,12346419,12346420,12346421,12346422,12346423,12346424,12346425,12346426,12346427,12346428,12346429,12346430,12346431,12346432,12346433,12346434,12346435,12346436,12346437,12346438,12346439,12346440,12346441,12346442,12346443,12346444,12346445,12346446,12346447,12346448,12346449,12346450,12346451,12346452,12346453,12346454,12346455,12346456,12346457,12346458,12346459,12346460,12346461,12346462,12346463,12346464,12346465,12346466,12346467,12346468,12346469,12346470,12346471,12346472,12346473,12346474,12346475,12346476,12346477,12346478,12346479,12346480,12346481,12346482,12346483,12346484,12346485,12346486,12346487,12346488,12346489,12346490,12346491,12346492,12346493,12346494,12346495,12346496,12346497,12346498,12346499,12346500,12346501,12346502,12346503,12346504,12346505,12346506,12346507,12346508,12346509,12346510,12346511,12346512,12346513,12346514,12346515,12346516,12346517,12346518,12346519,12346520,12346521,12346522,12346523,12346524,12346525,12346526,12346527,12346528,12346529,12346530,12346531,12346532,12346533,12346534,12346535,12346536,12346537,12346538,12346539,12346540,12346541,12346542,12346543,12346544,12346545,12346546,12346547,12346548,12346549,12346550,12346551,12346552,12346553,12346554,12346555,12346556,12346557,12346558,12346559,12346560,12346561,12346562,12346563,12346564,12346565,12346566,12346567,12346568,12346569,12346570,12346571,12346572,12346573,12346574,12346575,12346576,12346577,12346578,12346579,12346580,12346581,12346582,12346583,12346584,12346585,12346586,12346587,12346588,12346589,12346590,12346591,12346592,12346593,12346594,12346595,12346596,12346597,12346598,12346599,12346600,12346601,12346602,12346603,12346604,12346605,12346606,12346607,12346608,12346609,12346610,12346611,12346612,12346613,12346614,12346615,12346616,12346617,12346618,12346619,12346620,12346621,12346622,12346623,12346624,12346625,12346626,12346627,12346628,12346629,12346630,12346631,12346632,12346633,12346634,12346635,12346636,12346637,12346638,12346639,12346640,12346641,12346642,12346643,12346644,12346645,12346646,12346647,12346648,12346649,12346650,12346651,12346652,12346653,12346654,12346655,12346656,12346657,12346658,12346659,12346660,12346661,12346662,12346663,12346664,12346665,12346666,12346667,12346668,12346669,12346670,12346671,12346672,12346673,12346674,12346675,12346676,12346677,12346678}
+(1 row)
+
+declare held_portal cursor with hold for select * from toasted_data;
+commit;
+drop table toasted_data;
+fetch all in held_portal;
+ f1
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ {12345678,12345679,12345680,12345681,12345682,12345683,12345684,12345685,12345686,12345687,12345688,12345689,12345690,12345691,12345692,12345693,12345694,12345695,12345696,12345697,12345698,12345699,12345700,12345701,12345702,12345703,12345704,12345705,12345706,12345707,12345708,12345709,12345710,12345711,12345712,12345713,12345714,12345715,12345716,12345717,12345718,12345719,12345720,12345721,12345722,12345723,12345724,12345725,12345726,12345727,12345728,12345729,12345730,12345731,12345732,12345733,12345734,12345735,12345736,12345737,12345738,12345739,12345740,12345741,12345742,12345743,12345744,12345745,12345746,12345747,12345748,12345749,12345750,12345751,12345752,12345753,12345754,12345755,12345756,12345757,12345758,12345759,12345760,12345761,12345762,12345763,12345764,12345765,12345766,12345767,12345768,12345769,12345770,12345771,12345772,12345773,12345774,12345775,12345776,12345777,12345778,12345779,12345780,12345781,12345782,12345783,12345784,12345785,12345786,12345787,12345788,12345789,12345790,12345791,12345792,12345793,12345794,12345795,12345796,12345797,12345798,12345799,12345800,12345801,12345802,12345803,12345804,12345805,12345806,12345807,12345808,12345809,12345810,12345811,12345812,12345813,12345814,12345815,12345816,12345817,12345818,12345819,12345820,12345821,12345822,12345823,12345824,12345825,12345826,12345827,12345828,12345829,12345830,12345831,12345832,12345833,12345834,12345835,12345836,12345837,12345838,12345839,12345840,12345841,12345842,12345843,12345844,12345845,12345846,12345847,12345848,12345849,12345850,12345851,12345852,12345853,12345854,12345855,12345856,12345857,12345858,12345859,12345860,12345861,12345862,12345863,12345864,12345865,12345866,12345867,12345868,12345869,12345870,12345871,12345872,12345873,12345874,12345875,12345876,12345877,12345878,12345879,12345880,12345881,12345882,12345883,12345884,12345885,12345886,12345887,12345888,12345889,12345890,12345891,12345892,12345893,12345894,12345895,12345896,12345897,12345898,12345899,12345900,12345901,12345902,12345903,12345904,12345905,12345906,12345907,12345908,12345909,12345910,12345911,12345912,12345913,12345914,12345915,12345916,12345917,12345918,12345919,12345920,12345921,12345922,12345923,12345924,12345925,12345926,12345927,12345928,12345929,12345930,12345931,12345932,12345933,12345934,12345935,12345936,12345937,12345938,12345939,12345940,12345941,12345942,12345943,12345944,12345945,12345946,12345947,12345948,12345949,12345950,12345951,12345952,12345953,12345954,12345955,12345956,12345957,12345958,12345959,12345960,12345961,12345962,12345963,12345964,12345965,12345966,12345967,12345968,12345969,12345970,12345971,12345972,12345973,12345974,12345975,12345976,12345977,12345978,12345979,12345980,12345981,12345982,12345983,12345984,12345985,12345986,12345987,12345988,12345989,12345990,12345991,12345992,12345993,12345994,12345995,12345996,12345997,12345998,12345999,12346000,12346001,12346002,12346003,12346004,12346005,12346006,12346007,12346008,12346009,12346010,12346011,12346012,12346013,12346014,12346015,12346016,12346017,12346018,12346019,12346020,12346021,12346022,12346023,12346024,12346025,12346026,12346027,12346028,12346029,12346030,12346031,12346032,12346033,12346034,12346035,12346036,12346037,12346038,12346039,12346040,12346041,12346042,12346043,12346044,12346045,12346046,12346047,12346048,12346049,12346050,12346051,12346052,12346053,12346054,12346055,12346056,12346057,12346058,12346059,12346060,12346061,12346062,12346063,12346064,12346065,12346066,12346067,12346068,12346069,12346070,12346071,12346072,12346073,12346074,12346075,12346076,12346077,12346078,12346079,12346080,12346081,12346082,12346083,12346084,12346085,12346086,12346087,12346088,12346089,12346090,12346091,12346092,12346093,12346094,12346095,12346096,12346097,12346098,12346099,12346100,12346101,12346102,12346103,12346104,12346105,12346106,12346107,12346108,12346109,12346110,12346111,12346112,12346113,12346114,12346115,12346116,12346117,12346118,12346119,12346120,12346121,12346122,12346123,12346124,12346125,12346126,12346127,12346128,12346129,12346130,12346131,12346132,12346133,12346134,12346135,12346136,12346137,12346138,12346139,12346140,12346141,12346142,12346143,12346144,12346145,12346146,12346147,12346148,12346149,12346150,12346151,12346152,12346153,12346154,12346155,12346156,12346157,12346158,12346159,12346160,12346161,12346162,12346163,12346164,12346165,12346166,12346167,12346168,12346169,12346170,12346171,12346172,12346173,12346174,12346175,12346176,12346177,12346178,12346179,12346180,12346181,12346182,12346183,12346184,12346185,12346186,12346187,12346188,12346189,12346190,12346191,12346192,12346193,12346194,12346195,12346196,12346197,12346198,12346199,12346200,12346201,12346202,12346203,12346204,12346205,12346206,12346207,12346208,12346209,12346210,12346211,12346212,12346213,12346214,12346215,12346216,12346217,12346218,12346219,12346220,12346221,12346222,12346223,12346224,12346225,12346226,12346227,12346228,12346229,12346230,12346231,12346232,12346233,12346234,12346235,12346236,12346237,12346238,12346239,12346240,12346241,12346242,12346243,12346244,12346245,12346246,12346247,12346248,12346249,12346250,12346251,12346252,12346253,12346254,12346255,12346256,12346257,12346258,12346259,12346260,12346261,12346262,12346263,12346264,12346265,12346266,12346267,12346268,12346269,12346270,12346271,12346272,12346273,12346274,12346275,12346276,12346277,12346278,12346279,12346280,12346281,12346282,12346283,12346284,12346285,12346286,12346287,12346288,12346289,12346290,12346291,12346292,12346293,12346294,12346295,12346296,12346297,12346298,12346299,12346300,12346301,12346302,12346303,12346304,12346305,12346306,12346307,12346308,12346309,12346310,12346311,12346312,12346313,12346314,12346315,12346316,12346317,12346318,12346319,12346320,12346321,12346322,12346323,12346324,12346325,12346326,12346327,12346328,12346329,12346330,12346331,12346332,12346333,12346334,12346335,12346336,12346337,12346338,12346339,12346340,12346341,12346342,12346343,12346344,12346345,12346346,12346347,12346348,12346349,12346350,12346351,12346352,12346353,12346354,12346355,12346356,12346357,12346358,12346359,12346360,12346361,12346362,12346363,12346364,12346365,12346366,12346367,12346368,12346369,12346370,12346371,12346372,12346373,12346374,12346375,12346376,12346377,12346378,12346379,12346380,12346381,12346382,12346383,12346384,12346385,12346386,12346387,12346388,12346389,12346390,12346391,12346392,12346393,12346394,12346395,12346396,12346397,12346398,12346399,12346400,12346401,12346402,12346403,12346404,12346405,12346406,12346407,12346408,12346409,12346410,12346411,12346412,12346413,12346414,12346415,12346416,12346417,12346418,12346419,12346420,12346421,12346422,12346423,12346424,12346425,12346426,12346427,12346428,12346429,12346430,12346431,12346432,12346433,12346434,12346435,12346436,12346437,12346438,12346439,12346440,12346441,12346442,12346443,12346444,12346445,12346446,12346447,12346448,12346449,12346450,12346451,12346452,12346453,12346454,12346455,12346456,12346457,12346458,12346459,12346460,12346461,12346462,12346463,12346464,12346465,12346466,12346467,12346468,12346469,12346470,12346471,12346472,12346473,12346474,12346475,12346476,12346477,12346478,12346479,12346480,12346481,12346482,12346483,12346484,12346485,12346486,12346487,12346488,12346489,12346490,12346491,12346492,12346493,12346494,12346495,12346496,12346497,12346498,12346499,12346500,12346501,12346502,12346503,12346504,12346505,12346506,12346507,12346508,12346509,12346510,12346511,12346512,12346513,12346514,12346515,12346516,12346517,12346518,12346519,12346520,12346521,12346522,12346523,12346524,12346525,12346526,12346527,12346528,12346529,12346530,12346531,12346532,12346533,12346534,12346535,12346536,12346537,12346538,12346539,12346540,12346541,12346542,12346543,12346544,12346545,12346546,12346547,12346548,12346549,12346550,12346551,12346552,12346553,12346554,12346555,12346556,12346557,12346558,12346559,12346560,12346561,12346562,12346563,12346564,12346565,12346566,12346567,12346568,12346569,12346570,12346571,12346572,12346573,12346574,12346575,12346576,12346577,12346578,12346579,12346580,12346581,12346582,12346583,12346584,12346585,12346586,12346587,12346588,12346589,12346590,12346591,12346592,12346593,12346594,12346595,12346596,12346597,12346598,12346599,12346600,12346601,12346602,12346603,12346604,12346605,12346606,12346607,12346608,12346609,12346610,12346611,12346612,12346613,12346614,12346615,12346616,12346617,12346618,12346619,12346620,12346621,12346622,12346623,12346624,12346625,12346626,12346627,12346628,12346629,12346630,12346631,12346632,12346633,12346634,12346635,12346636,12346637,12346638,12346639,12346640,12346641,12346642,12346643,12346644,12346645,12346646,12346647,12346648,12346649,12346650,12346651,12346652,12346653,12346654,12346655,12346656,12346657,12346658,12346659,12346660,12346661,12346662,12346663,12346664,12346665,12346666,12346667,12346668,12346669,12346670,12346671,12346672,12346673,12346674,12346675,12346676,12346677,12346678}
+(1 row)
+
+reset default_toast_compression;
diff --git a/src/test/regress/expected/prepare_optimizer.out b/src/test/regress/expected/prepare_optimizer.out
index 001caefc7a1..eb0c1bd447d 100644
--- a/src/test/regress/expected/prepare_optimizer.out
+++ b/src/test/regress/expected/prepare_optimizer.out
@@ -2,9 +2,9 @@
-- of the pg_prepared_statements view as prepared statements are
-- created and removed.
SET optimizer_trace_fallback to on;
-SELECT name, statement, parameter_types FROM pg_prepared_statements;
- name | statement | parameter_types
-------+-----------+-----------------
+SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
+ name | statement | parameter_types | result_types
+------+-----------+-----------------+--------------
(0 rows)
PREPARE q1 AS SELECT 1 AS a;
@@ -14,10 +14,10 @@ EXECUTE q1;
1
(1 row)
-SELECT name, statement, parameter_types FROM pg_prepared_statements;
- name | statement | parameter_types
-------+------------------------------+-----------------
- q1 | PREPARE q1 AS SELECT 1 AS a; | {}
+SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
+ name | statement | parameter_types | result_types
+------+------------------------------+-----------------+--------------
+ q1 | PREPARE q1 AS SELECT 1 AS a; | {} | {integer}
(1 row)
-- should fail
@@ -33,26 +33,26 @@ EXECUTE q1;
(1 row)
PREPARE q2 AS SELECT 2 AS b;
-SELECT name, statement, parameter_types FROM pg_prepared_statements;
- name | statement | parameter_types
-------+------------------------------+-----------------
- q1 | PREPARE q1 AS SELECT 2; | {}
- q2 | PREPARE q2 AS SELECT 2 AS b; | {}
+SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
+ name | statement | parameter_types | result_types
+------+------------------------------+-----------------+--------------
+ q1 | PREPARE q1 AS SELECT 2; | {} | {integer}
+ q2 | PREPARE q2 AS SELECT 2 AS b; | {} | {integer}
(2 rows)
-- sql92 syntax
DEALLOCATE PREPARE q1;
-SELECT name, statement, parameter_types FROM pg_prepared_statements;
- name | statement | parameter_types
-------+------------------------------+-----------------
- q2 | PREPARE q2 AS SELECT 2 AS b; | {}
+SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
+ name | statement | parameter_types | result_types
+------+------------------------------+-----------------+--------------
+ q2 | PREPARE q2 AS SELECT 2 AS b; | {} | {integer}
(1 row)
DEALLOCATE PREPARE q2;
-- the view should return the empty set again
-SELECT name, statement, parameter_types FROM pg_prepared_statements;
- name | statement | parameter_types
-------+-----------+-----------------
+SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
+ name | statement | parameter_types | result_types
+------+-----------+-----------------+--------------
(0 rows)
-- parameterized queries
@@ -64,7 +64,7 @@ INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Queries on master-only tables
datname | datistemplate | datallowconn
----------+---------------+--------------
- postgres | t | t
+ postgres | f | t
(1 row)
PREPARE q3(text, int, float, boolean, smallint) AS
@@ -162,25 +162,30 @@ PREPARE q6 AS
SELECT * FROM tenk1 WHERE unique1 = $1 AND stringu1 = $2;
PREPARE q7(unknown) AS
SELECT * FROM road WHERE thepath = $1;
-SELECT name, statement, parameter_types FROM pg_prepared_statements
+-- DML statements
+PREPARE q8 AS
+ UPDATE tenk1 SET stringu1 = $2 WHERE unique1 = $1;
+SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements
ORDER BY name;
- name | statement | parameter_types
-------+------------------------------------------------------------------+----------------------------------------------------
- q2 | PREPARE q2(text) AS +| {text}
- | SELECT datname, datistemplate, datallowconn +|
- | FROM pg_database WHERE datname = $1; |
- q3 | PREPARE q3(text, int, float, boolean, smallint) AS +| {text,integer,"double precision",boolean,smallint}
- | SELECT * FROM tenk1 WHERE string4 = $1 AND (four = $2 OR+|
- | ten = $3::bigint OR true = $4 OR odd = $5::int) +|
- | ORDER BY unique1; |
- q5 | PREPARE q5(int, text) AS +| {integer,text}
- | SELECT * FROM tenk1 WHERE unique1 = $1 OR stringu1 = $2 +|
- | ORDER BY unique1; |
- q6 | PREPARE q6 AS +| {integer,name}
- | SELECT * FROM tenk1 WHERE unique1 = $1 AND stringu1 = $2; |
- q7 | PREPARE q7(unknown) AS +| {path}
- | SELECT * FROM road WHERE thepath = $1; |
-(5 rows)
+ name | statement | parameter_types | result_types
+------+------------------------------------------------------------------+----------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------
+ q2 | PREPARE q2(text) AS +| {text} | {name,boolean,boolean}
+ | SELECT datname, datistemplate, datallowconn +| |
+ | FROM pg_database WHERE datname = $1; | |
+ q3 | PREPARE q3(text, int, float, boolean, smallint) AS +| {text,integer,"double precision",boolean,smallint} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name}
+ | SELECT * FROM tenk1 WHERE string4 = $1 AND (four = $2 OR+| |
+ | ten = $3::bigint OR true = $4 OR odd = $5::int) +| |
+ | ORDER BY unique1; | |
+ q5 | PREPARE q5(int, text) AS +| {integer,text} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name}
+ | SELECT * FROM tenk1 WHERE unique1 = $1 OR stringu1 = $2 +| |
+ | ORDER BY unique1; | |
+ q6 | PREPARE q6 AS +| {integer,name} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name}
+ | SELECT * FROM tenk1 WHERE unique1 = $1 AND stringu1 = $2; | |
+ q7 | PREPARE q7(unknown) AS +| {path} | {text,path}
+ | SELECT * FROM road WHERE thepath = $1; | |
+ q8 | PREPARE q8 AS +| {integer,name} |
+ | UPDATE tenk1 SET stringu1 = $2 WHERE unique1 = $1; | |
+(6 rows)
-- test DEALLOCATE ALL;
DEALLOCATE ALL;
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
index 7d4396eceb9..0fdf5753687 100644
--- a/src/test/regress/expected/privileges.out
+++ b/src/test/regress/expected/privileges.out
@@ -1949,32 +1949,7 @@ BEGIN
EXCEPTION WHEN OTHERS THEN
RETURN 2;
END$$;
-CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c;
--- start_ignore
--- GPDB_14_MERGE_FIXME: the following command will abort the sub-transaction
--- in a DDL. The problem is that aborting the sub-transaction will also erase
--- the `dispatch_oids` needed by the QEs. It's a rare case.
--- We don't support this case now
-CREATE UNIQUE INDEX ON sro_index_mv (c) WHERE unwanted_grant_nofail(1) > 0;
-ERROR: no pre-assigned OID for pg_class tuple "sro_index_mv_c_idx" (namespace:2200 keyOid1:0 keyOid2:0) (oid_dispatch.c:371)
--- end_ignore
-\c -
--- REFRESH MATERIALIZED VIEW CONCURRENTLY sro_index_mv;
-REFRESH MATERIALIZED VIEW sro_index_mv;
--- REFRESH MATERIALIZED VIEW CONCURRENTLY use of eval_const_expressions()
-SET SESSION AUTHORIZATION regress_sro_user;
-CREATE FUNCTION unwanted_grant_nofail(int) RETURNS int
- IMMUTABLE LANGUAGE plpgsql AS $$
-BEGIN
- PERFORM unwanted_grant();
- RAISE WARNING 'owned';
- RETURN 1;
-EXCEPTION WHEN OTHERS THEN
- RETURN 2;
-END$$;
-ERROR: function "unwanted_grant_nofail" already exists with same argument types
-CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c;
-ERROR: relation "sro_index_mv" already exists
+CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c DISTRIBUTED BY (c);
CREATE UNIQUE INDEX ON sro_index_mv (c) WHERE unwanted_grant_nofail(1) > 0;
\c -
REFRESH MATERIALIZED VIEW CONCURRENTLY sro_index_mv;
diff --git a/src/test/regress/expected/qp_gist_indexes2_optimizer.out b/src/test/regress/expected/qp_gist_indexes2_optimizer.out
index 902c3c7d5e2..55462256b50 100644
--- a/src/test/regress/expected/qp_gist_indexes2_optimizer.out
+++ b/src/test/regress/expected/qp_gist_indexes2_optimizer.out
@@ -24,8 +24,10 @@ language plpython3u;
CREATE TABLE GistTable1 ( id INTEGER, owner VARCHAR, description VARCHAR, property BOX, poli POLYGON, bullseye CIRCLE, v VARCHAR, t TEXT, f FLOAT, p POINT, c CIRCLE, filler VARCHAR DEFAULT 'This is here just to take up space so that we use more pages of data and sequential scans take a lot more time. Stones tinheads and mixers coming; we did it all on our own; this summer I hear the crunching; 11 dead in Ohio. Got right down to it; we were cutting us down; could have had fun but, no; left them face down dead on the ground. How can you listen when you know?'
)
DISTRIBUTED BY (id);
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set PropertyInfo_file :abs_srcdir '/data/PropertyInfo.txt'
COPY GistTable1 FROM
-'@abs_srcdir@/data/PropertyInfo.txt'
+:'PropertyInfo_file'
CSV
;
ANALYZE GistTable1;
@@ -752,7 +754,7 @@ CREATE TABLE GistTable1 ( id INTEGER, owner VARCHAR, description VARCHAR, proper
WITH (APPENDONLY=True, COMPRESSTYPE=ZLIB, COMPRESSLEVEL=1)
DISTRIBUTED BY (id);
COPY GistTable1 FROM
-'@abs_srcdir@/data/PropertyInfo.txt'
+:'PropertyInfo_file'
CSV
;
-- ----------------------------------------------------------------------
@@ -1377,7 +1379,7 @@ CREATE TABLE GistTable1 ( id INTEGER, owner VARCHAR, description VARCHAR, proper
WITH (APPENDONLY=True)
DISTRIBUTED BY (id);
COPY GistTable1 FROM
-'@abs_srcdir@/data/PropertyInfo.txt'
+:'PropertyInfo_file'
CSV
;
-- ----------------------------------------------------------------------
@@ -2002,7 +2004,7 @@ CREATE TABLE GistTable1 ( id INTEGER, owner VARCHAR, description VARCHAR, proper
WITH (APPENDONLY=True, ORIENTATION='column', COMPRESSTYPE=ZLIB, COMPRESSLEVEL=1)
DISTRIBUTED BY (id);
COPY GistTable1 FROM
-'@abs_srcdir@/data/PropertyInfo.txt'
+:'PropertyInfo_file'
CSV
;
-- ----------------------------------------------------------------------
@@ -2627,7 +2629,7 @@ CREATE TABLE GistTable1 ( id INTEGER, owner VARCHAR, description VARCHAR, proper
WITH (APPENDONLY=True, ORIENTATION='column')
DISTRIBUTED BY (id);
COPY GistTable1 FROM
-'@abs_srcdir@/data/PropertyInfo.txt'
+:'PropertyInfo_file'
CSV
;
-- ----------------------------------------------------------------------
diff --git a/src/test/regress/expected/qp_misc_jiras_optimizer.out b/src/test/regress/expected/qp_misc_jiras_optimizer.out
index ec761ebc3fc..084c4ddb34b 100644
--- a/src/test/regress/expected/qp_misc_jiras_optimizer.out
+++ b/src/test/regress/expected/qp_misc_jiras_optimizer.out
@@ -3404,7 +3404,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
insert into qp_misc_jiras.tbl7286_test select i%10, '2009/01/01'::date + (i || ' days')::interval from generate_series(0, 99999) i;
set gp_enable_agg_distinct=off;
set gp_enable_agg_distinct_pruning=off;
-set statement_mem='1000kB';
+set statement_mem='2000kB';
set optimizer_force_three_stage_scalar_dqa=off;
select count(distinct d) from qp_misc_jiras.tbl7286_test;
count
@@ -5544,6 +5544,8 @@ drop table qp_misc_jiras.tbl13491_aocol;
create table qp_misc_jiras.rules (a integer);
create rule "_RETURN" as on select to qp_misc_jiras.rules do instead
select * from generate_series(1,5) x(a);
+DETAIL: This operation is not supported for tables.
+ERROR: relation "rules" cannot have ON SELECT rules
--
-- Test gp_enable_relsize_collection's effect on ORCA plan generation
--
diff --git a/src/test/regress/expected/qp_orca_fallback_optimizer.out b/src/test/regress/expected/qp_orca_fallback_optimizer.out
index ad2eeabd1e5..476c12c31a4 100644
--- a/src/test/regress/expected/qp_orca_fallback_optimizer.out
+++ b/src/test/regress/expected/qp_orca_fallback_optimizer.out
@@ -36,7 +36,7 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
--------------------------------------------------------------------------------------------------
Update on constr_tab (cost=0.00..1.03 rows=1 width=22)
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3) (cost=0.00..1.03 rows=1 width=22)
- -> Split (cost=0.00..1.03 rows=1 width=22)
+ -> Split Update (cost=0.00..1.03 rows=1 width=22)
-> Seq Scan on constr_tab (cost=0.00..1.01 rows=1 width=22)
Optimizer: Postgres query optimizer
(5 rows)
@@ -72,7 +72,7 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
--------------------------------------------------------------------------------------------------
Update on constr_tab (cost=0.00..1.03 rows=1 width=22)
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3) (cost=0.00..1.03 rows=1 width=22)
- -> Split (cost=0.00..1.03 rows=1 width=22)
+ -> Split Update (cost=0.00..1.03 rows=1 width=22)
-> Seq Scan on constr_tab (cost=0.00..1.01 rows=1 width=22)
Optimizer: Postgres query optimizer
(5 rows)
@@ -93,7 +93,7 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
--------------------------------------------------------------------------------------------------
Update on constr_tab (cost=0.00..1.03 rows=1 width=22)
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3) (cost=0.00..1.03 rows=1 width=22)
- -> Split (cost=0.00..1.03 rows=1 width=22)
+ -> Split Update (cost=0.00..1.03 rows=1 width=22)
-> Seq Scan on constr_tab (cost=0.00..1.01 rows=1 width=22)
Optimizer: Postgres query optimizer
(5 rows)
@@ -112,7 +112,7 @@ explain update constr_tab set a = 10;
-> Result (cost=0.00..431.00 rows=2 width=34)
-> Redistribute Motion 3:3 (slice1; segments: 3) (cost=0.00..431.00 rows=2 width=30)
Hash Key: constr_tab_1.a
- -> Split (cost=0.00..431.00 rows=1 width=30)
+ -> Split Update (cost=0.00..431.00 rows=1 width=30)
-> Seq Scan on constr_tab (cost=0.00..431.00 rows=1 width=26)
Optimizer: Pivotal Optimizer (GPORCA)
(9 rows)
diff --git a/src/test/regress/expected/qp_query_execution.out b/src/test/regress/expected/qp_query_execution.out
index 27b0c80ef4b..2c6f17dfaa4 100644
--- a/src/test/regress/expected/qp_query_execution.out
+++ b/src/test/regress/expected/qp_query_execution.out
@@ -160,10 +160,10 @@ insert into bar select i % 7, i % 6, i % 9, i || 'SOME NUMBER', i % 4 from gener
insert into bar select i % 7, i % 6, i % 9, i || 'SOME NUMBER', i % 4 from generate_series(1, 10000) i;
analyze foo_p;
analyze bar;
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6;', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6;', 'Nested Loop Left Join', 'Hash Right Join');
qx_count_operator
-------------------
- 0
+ 1
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6 order by 1, 2 desc limit 10;
@@ -343,10 +343,10 @@ select abbp.k, abbp.t from abbp left outer join b on abbp.k = b.k where abbp.t
186SN SN | 186
(10 rows)
-select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Nested Loop Left Join', 'Hash Right Join');
qx_count_operator
-------------------
- 0
+ 1
(1 row)
select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = '6SOME NUMBER' order by 1, 2 desc limit 10;
@@ -438,10 +438,10 @@ select abbp.k, abbp.t from abbp left outer join b on abbp.k = b.k where abbp.t i
36 | 36SOME NUMBER SOME NUMBER
(10 rows)
-select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = 6;', 'Hash Right Join', 'Hash Left Join');
+select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = 6;', 'Nested Loop Left Join', 'Hash Left Join');
qx_count_operator
-------------------
- 0
+ 1
(1 row)
select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = 6 order by 1, 2 asc limit 10;
@@ -486,10 +486,10 @@ select foo_p.k, foo_p.t from foo_p left outer join bar on foo_p.k = bar.k where
6.00 | 6SOME NUMBER SOME NUMBER
(10 rows)
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6.00;', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6.00;', 'Nested Loop Left Join', 'Hash Right Join');
qx_count_operator
-------------------
- 0
+ 1
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6.00 order by 1, 2 desc limit 10;
@@ -541,10 +541,10 @@ select abbp.k, abbp.t from abbp left outer join b on abbp.k = b.k where abbp.t
186SN SN | 186
(10 rows)
-select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Nested Loop Left Join', 'Hash Right Join');
qx_count_operator
-------------------
- 0
+ 1
(1 row)
select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = '6SOME NUMBER' order by 1, 2 asc limit 10;
@@ -597,10 +597,10 @@ select foo_p.k, foo_p.t from foo_p left outer join bar_p on foo_p.k = bar_p.k w
6 | 6SOME NUMBER SOME NUMBER
(10 rows)
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k where foo_p.t is not null and foo_p.a = 6;', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k where foo_p.t is not null and foo_p.a = 6;', 'Nested Loop Left Join', 'Hash Right Join');
qx_count_operator
-------------------
- 0
+ 1
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k where foo_p.t is not null and foo_p.a = 6 order by 1, 2 asc limit 10;
@@ -694,10 +694,10 @@ select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k a
6 | 6SOME NUMBER SOME NUMBER
(1 row)
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.a where foo_p.t is not null and foo_p.a = 6;', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.a where foo_p.t is not null and foo_p.a = 6;', 'Nested Loop Left Join', 'Hash Right Join');
qx_count_operator
-------------------
- 0
+ 1
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.a where foo_p.t is not null and foo_p.a = 6 order by 1, 2 asc limit 10;
@@ -728,10 +728,10 @@ insert into bar select i % 7, i % 6, i % 9, i || 'SOME NUMBER', i % 4 from gener
insert into bar select i % 7, i % 6, i % 9, i || 'SOME NUMBER', i % 4 from generate_series(1, 10000) i;
analyze foo_p;
analyze bar;
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = (array[1])[1];', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = (array[1])[1];', 'Nested Loop Left Join', 'Hash Right Join');
qx_count_operator
-------------------
- 0
+ 1
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = (array[1])[1] order by 1, 2 desc limit 10;
@@ -750,10 +750,10 @@ select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where
(10 rows)
create function mytest(integer) returns integer as 'select $1/100' language sql;
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = mytest(100);', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = mytest(100);', 'Nested Loop Left Join', 'Hash Right Join');
qx_count_operator
-------------------
- 0
+ 1
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = mytest(100) order by 1, 2 asc limit 10;
diff --git a/src/test/regress/expected/qp_rowsecurity_optimizer.out b/src/test/regress/expected/qp_rowsecurity_optimizer.out
index 5f6b97d86e2..642d16e50d1 100644
--- a/src/test/regress/expected/qp_rowsecurity_optimizer.out
+++ b/src/test/regress/expected/qp_rowsecurity_optimizer.out
@@ -812,7 +812,7 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
---------------------------------------------------------------
Update on foo_rls
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
- -> Split
+ -> Split Update
-> Seq Scan on foo_rls
Filter: ((c >= 3) AND (c = 4))
Optimizer: Postgres-based planner
@@ -845,7 +845,7 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
Update on foo_part_rls_1_prt_4 foo_part_rls_4
Update on foo_part_rls_1_prt_5 foo_part_rls_5
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
- -> Split
+ -> Split Update
-> Append
-> Seq Scan on foo_part_rls_1_prt_1 foo_part_rls_1
Filter: ((c >= 500) AND (c = 800))
diff --git a/src/test/regress/expected/random.out b/src/test/regress/expected/random.out
index 223590720cc..a8e55b2e008 100644
--- a/src/test/regress/expected/random.out
+++ b/src/test/regress/expected/random.out
@@ -42,7 +42,7 @@ BEGIN
WITH samples AS (
SELECT random() r FROM generate_series(1, n) ORDER BY 1
), indexed_samples AS (
- SELECT (row_number() OVER())-1.0 i, r FROM samples
+ SELECT (row_number() OVER(ORDER BY r))-1.0 i, r FROM samples
)
SELECT max(abs(i/n-r)) < c / sqrt(n) FROM indexed_samples
);
@@ -101,7 +101,7 @@ BEGIN
WITH samples AS (
SELECT random_normal() r FROM generate_series(1, n) ORDER BY 1
), indexed_samples AS (
- SELECT (row_number() OVER())-1.0 i, r FROM samples
+ SELECT (row_number() OVER(ORDER BY r))-1.0 i, r FROM samples
)
SELECT max(abs((1+erf(r/sqrt(2)))/2 - i/n)) < c / sqrt(n)
FROM indexed_samples
diff --git a/src/test/regress/expected/rangefuncs_optimizer.out b/src/test/regress/expected/rangefuncs_optimizer.out
index c3a6718c265..471b80651c6 100644
--- a/src/test/regress/expected/rangefuncs_optimizer.out
+++ b/src/test/regress/expected/rangefuncs_optimizer.out
@@ -149,9 +149,9 @@ select * from vw_ord;
select definition from pg_views where viewname='vw_ord';
definition
----------------------------------------------------------------------------------------
- SELECT z.a, +
- z.b, +
- z.c +
+ SELECT a, +
+ b, +
+ c +
FROM UNNEST(ARRAY[10, 20], ARRAY['foo'::text, 'bar'::text], ARRAY[1.0]) z(a, b, c);
(1 row)
@@ -167,9 +167,9 @@ select * from vw_ord;
select definition from pg_views where viewname='vw_ord';
definition
----------------------------------------------------------------------------------------
- SELECT z.a, +
- z.b, +
- z.c +
+ SELECT a, +
+ b, +
+ c +
FROM UNNEST(ARRAY[10, 20], ARRAY['foo'::text, 'bar'::text], ARRAY[1.0]) z(a, b, c);
(1 row)
@@ -185,9 +185,9 @@ select * from vw_ord;
select definition from pg_views where viewname='vw_ord';
definition
----------------------------------------------------------------------------------------------------------------------
- SELECT z.a, +
- z.b, +
- z.c +
+ SELECT a, +
+ b, +
+ c +
FROM ROWS FROM(unnest(ARRAY[10, 20]), unnest(ARRAY['foo'::text, 'bar'::text]), generate_series(1, 2)) z(a, b, c);
(1 row)
@@ -610,14 +610,14 @@ select * from vw_rngfunc;
select pg_get_viewdef('vw_rngfunc');
pg_get_viewdef
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- SELECT t1.a, +
- t1.b, +
- t1.c, +
- t1.d, +
- t1.e, +
- t1.f, +
- t1.g, +
- t1.n +
+ SELECT a, +
+ b, +
+ c, +
+ d, +
+ e, +
+ f, +
+ g, +
+ n +
FROM ROWS FROM(getrngfunc9(1), getrngfunc7(1) AS (rngfuncid integer, rngfuncsubid integer, rngfuncname text), getrngfunc1(1)) WITH ORDINALITY t1(a, b, c, d, e, f, g, n);
(1 row)
@@ -1928,18 +1928,18 @@ create function testrngfunc() returns setof record as $$
insert into rngfunc values (1,2), (3,4) returning *;
$$ language sql;
select testrngfunc();
- testrngfunc
--------------
- (1,2)
- (3,4)
-(2 rows)
+ERROR: relation "rngfunc" does not exist
+LINE 2: insert into rngfunc values (1,2), (3,4) returning *;
+ ^
+QUERY:
+ insert into rngfunc values (1,2), (3,4) returning *;
select * from testrngfunc() as t(f1 int8,f2 int8);
- f1 | f2
-----+----
- 1 | 2
- 3 | 4
-(2 rows)
+ERROR: relation "rngfunc" does not exist
+LINE 2: insert into rngfunc values (1,2), (3,4) returning *;
+ ^
+QUERY:
+ insert into rngfunc values (1,2), (3,4) returning *;
select * from testrngfunc(); -- fail
ERROR: a column definition list is required for functions returning "record"
@@ -2234,15 +2234,44 @@ select * from usersview;
id2 | 2 | email2 | 12 | t | 11 | 2
(2 rows)
+alter table users drop column moredrop; -- fail, view has reference
+ERROR: cannot drop column moredrop of table users because other objects depend on it
+-- We used to have a bug that would allow the above to succeed, posing
+-- hazards for later execution of the view. Check that the internal
+-- defenses for those hazards haven't bit-rotted, in case some other
+-- bug with similar symptoms emerges.
begin;
-alter table users drop column moredrop;
+-- destroy the dependency entry that prevents the DROP:
+delete from pg_depend where
+ objid = (select oid from pg_rewrite
+ where ev_class = 'usersview'::regclass and rulename = '_RETURN')
+ and refobjsubid = 5
+returning pg_describe_object(classid, objid, objsubid) as obj,
+ pg_describe_object(refclassid, refobjid, refobjsubid) as ref,
+ deptype;
+ERROR: permission denied: "pg_depend" is a system catalog
+alter table users drop column moredrop cascade;
+ERROR: current transaction is aborted, commands ignored until end of transaction block
select * from usersview; -- expect clean failure
-ERROR: attribute 5 of type record has been dropped
+ERROR: current transaction is aborted, commands ignored until end of transaction block
rollback;
-alter table users alter column seq type numeric;
+alter table users alter column seq type numeric; -- fail, view has reference
+ERROR: cannot alter type of a column used by a view or rule
+-- likewise, check we don't crash if the dependency goes wrong
+begin;
+-- destroy the dependency entry that prevents the ALTER:
+delete from pg_depend where
+ objid = (select oid from pg_rewrite
+ where ev_class = 'usersview'::regclass and rulename = '_RETURN')
+ and refobjsubid = 2
+returning pg_describe_object(classid, objid, objsubid) as obj,
+ pg_describe_object(refclassid, refobjid, refobjsubid) as ref,
+ deptype;
+ERROR: permission denied: "pg_depend" is a system catalog
+-- alter table users alter column seq type numeric;
select * from usersview; -- expect clean failure
-ERROR: attribute 2 of type record has wrong type
-DETAIL: Table has type numeric, but query expects integer.
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+rollback;
drop view usersview;
drop function get_first_user();
drop function get_users();
@@ -2515,3 +2544,16 @@ select * from
[{"id": "1"}] | 1
(1 row)
+-- check detection of mismatching record types with a const-folded expression
+with a(b) as (values (row(1,2,3)))
+select * from a, coalesce(b) as c(d int, e int); -- fail
+ERROR: function return row and query-specified return row do not match
+with a(b) as (values (row(1,2,3)))
+select * from a, coalesce(b) as c(d int, e int, f int, g int); -- fail
+ERROR: function return row and query-specified return row do not match
+with a(b) as (values (row(1,2,3)))
+select * from a, coalesce(b) as c(d int, e int, f float); -- fail
+ERROR: function return row and query-specified return row do not match
+select * from int8_tbl, coalesce(row(1)) as (a int, b int); -- fail
+ERROR: function return row and query-specified return row do not match
+
diff --git a/src/test/regress/expected/rangetypes_optimizer.out b/src/test/regress/expected/rangetypes_optimizer.out
index 3f0c81b34dc..65b38d385a7 100644
--- a/src/test/regress/expected/rangetypes_optimizer.out
+++ b/src/test/regress/expected/rangetypes_optimizer.out
@@ -1,13 +1,7 @@
-- Tests for range data types.
--- start_matchsubs
--- m/NOTICE: One or more columns in the following table\(s\) do not have statistics: /
--- s/.//gs
--- m/HINT: For non-partitioned tables, run analyze .+\. For partitioned tables, run analyze rootpartition .+\. See log for columns missing statistics\./
--- s/.//gs
--- end_matchsubs
-create type textrange as range (subtype=text, collation="C");
--
-- test input parser
+-- (type textrange was already made in test_setup.sql)
--
-- negative tests; should fail
select ''::textrange;
@@ -181,6 +175,73 @@ select '(a,a)'::textrange;
empty
(1 row)
+-- Also try it with non-error-throwing API
+select pg_input_is_valid('(1,4)', 'int4range');
+ pg_input_is_valid
+-------------------
+ t
+(1 row)
+
+select pg_input_is_valid('(1,4', 'int4range');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+select * from pg_input_error_info('(1,4', 'int4range');
+ message | detail | hint | sql_error_code
+---------------------------------+--------------------------+------+----------------
+ malformed range literal: "(1,4" | Unexpected end of input. | | 22P02
+(1 row)
+
+select pg_input_is_valid('(4,1)', 'int4range');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+select * from pg_input_error_info('(4,1)', 'int4range');
+ message | detail | hint | sql_error_code
+-------------------------------------------------------------------+--------+------+----------------
+ range lower bound must be less than or equal to range upper bound | | | 22000
+(1 row)
+
+select pg_input_is_valid('(4,zed)', 'int4range');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+select * from pg_input_error_info('(4,zed)', 'int4range');
+ message | detail | hint | sql_error_code
+----------------------------------------------+--------+------+----------------
+ invalid input syntax for type integer: "zed" | | | 22P02
+(1 row)
+
+select pg_input_is_valid('[1,2147483647]', 'int4range');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+select * from pg_input_error_info('[1,2147483647]', 'int4range');
+ message | detail | hint | sql_error_code
+----------------------+--------+------+----------------
+ integer out of range | | | 22003
+(1 row)
+
+select pg_input_is_valid('[2000-01-01,5874897-12-31]', 'daterange');
+ pg_input_is_valid
+-------------------
+ f
+(1 row)
+
+select * from pg_input_error_info('[2000-01-01,5874897-12-31]', 'daterange');
+ message | detail | hint | sql_error_code
+-------------------+--------+------+----------------
+ date out of range | | | 22008
+(1 row)
+
--
-- create some test data and test the operators
--
@@ -1494,12 +1555,11 @@ LINE 1: select '[2010-01-01 01:00:00 -08, 2010-01-01 02:00:00 -05)':...
set timezone to default;
--
-- Test user-defined range of floats
+-- (type float8range was already made in test_setup.sql)
--
--should fail
-create type float8range as range (subtype=float8, subtype_diff=float4mi);
+create type bogus_float8range as range (subtype=float8, subtype_diff=float4mi);
ERROR: function float4mi(double precision, double precision) does not exist
---should succeed
-create type float8range as range (subtype=float8, subtype_diff=float8mi);
select '[123.001, 5.e9)'::float8range @> 888.882::float8;
?column?
----------
@@ -1701,7 +1761,7 @@ reset enable_sort;
--
create type two_ints as (a int, b int);
create type two_ints_range as range (subtype = two_ints);
--- with force_parallel_mode on, this exercises tqueue.c's range remapping
+-- with debug_parallel_query on, this exercises tqueue.c's range remapping
select *, row_to_json(upper(t)) as u from
(values (two_ints_range(row(1,2), row(3,4))),
(two_ints_range(row(5,6), row(7,8)))) v(t);
diff --git a/src/test/regress/expected/rowsecurity_optimizer.out b/src/test/regress/expected/rowsecurity_optimizer.out
index 5717d5bf9a7..6cadc77c912 100644
--- a/src/test/regress/expected/rowsecurity_optimizer.out
+++ b/src/test/regress/expected/rowsecurity_optimizer.out
@@ -1674,11 +1674,11 @@ SET SESSION AUTHORIZATION regress_rls_alice;
CREATE TABLE s1 (a int, b text);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-INSERT INTO s1 (SELECT x, md5(x::text) FROM generate_series(-10,10) x);
+INSERT INTO s1 (SELECT x, public.fipshash(x::text) FROM generate_series(-10,10) x);
CREATE TABLE s2 (x int, y text);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-INSERT INTO s2 (SELECT x, md5(x::text) FROM generate_series(-6,6) x);
+INSERT INTO s2 (SELECT x, public.fipshash(x::text) FROM generate_series(-6,6) x);
GRANT SELECT ON s1, s2 TO regress_rls_bob;
CREATE POLICY p1 ON s1 USING (a in (select x from s2 where y like '%2f%'));
CREATE POLICY p2 ON s2 USING (x in (select a from s1 where b like '%22%'));
@@ -1698,13 +1698,11 @@ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM s1 WHERE f_leak(b); -- OK
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
-NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
- a | b
----+----------------------------------
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
-(2 rows)
+NOTICE: f_leak => 03b26944890929ff751653acb2f2af79
+ a | b
+----+----------------------------------
+ -6 | 03b26944890929ff751653acb2f2af79
+(1 row)
EXPLAIN (COSTS OFF) SELECT * FROM only s1 WHERE f_leak(b);
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
@@ -1727,13 +1725,11 @@ SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM s1 WHERE f_leak(b); -- OK
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
-NOTICE: f_leak => 0267aaf632e87a63288a08331f22c7c3
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
+NOTICE: f_leak => 03b26944890929ff751653acb2f2af79
a | b
----+----------------------------------
- -4 | 0267aaf632e87a63288a08331f22c7c3
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
-(2 rows)
+ -6 | 03b26944890929ff751653acb2f2af79
+(1 row)
EXPLAIN (COSTS OFF) SELECT * FROM s1 WHERE f_leak(b);
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
@@ -1755,10 +1751,8 @@ INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
xx | x | y
----+----+----------------------------------
- -6 | -6 | 596a3d04481816330f07e4f97510c28f
- -4 | -4 | 0267aaf632e87a63288a08331f22c7c3
- 2 | 2 | c81e728d9d4c2f636f067f89cc14862c
-(3 rows)
+ -4 | -4 | e5e0093f285a4fb94c3fcc2ad7fd04ed
+(1 row)
EXPLAIN (COSTS OFF) SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%';
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
@@ -2294,7 +2288,7 @@ SET SESSION AUTHORIZATION regress_rls_alice;
CREATE TABLE b1 (a int, b text);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-INSERT INTO b1 (SELECT x, md5(x::text) FROM generate_series(-10,10) x);
+INSERT INTO b1 (SELECT x, public.fipshash(x::text) FROM generate_series(-10,10) x);
CREATE POLICY p1 ON b1 USING (a % 2 = 0);
ALTER TABLE b1 ENABLE ROW LEVEL SECURITY;
GRANT ALL ON b1 TO regress_rls_bob;
@@ -2318,18 +2312,18 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
SELECT * FROM bv1 WHERE f_leak(b);
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: views with security_barrier ON
-NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
-NOTICE: f_leak => d3d9446802a44259755d38e6d163e820
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
-NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d
+NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00
+NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1
+NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02
+NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03
+NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f
a | b
----+----------------------------------
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 10 | d3d9446802a44259755d38e6d163e820
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 10 | 4a44dc15364204a80fe80e9039455cc1
(5 rows)
INSERT INTO bv1 VALUES (-1, 'xxx'); -- should fail view WCO
@@ -2357,7 +2351,7 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b);
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: View with WITH CHECK OPTION
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
+NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02
EXPLAIN (COSTS OFF) DELETE FROM bv1 WHERE a = 6 AND f_leak(b);
QUERY PLAN
-------------------------------------------------------------------------
@@ -2368,30 +2362,30 @@ EXPLAIN (COSTS OFF) DELETE FROM bv1 WHERE a = 6 AND f_leak(b);
(4 rows)
DELETE FROM bv1 WHERE a = 6 AND f_leak(b);
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
+NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f
SET SESSION AUTHORIZATION regress_rls_alice;
SELECT * FROM b1;
a | b
-----+----------------------------------
- -10 | 1b0fd9efa5279c4203b7c70233f86dbf
- -9 | 252e691406782824eec43d7eadc3d256
- -8 | a8d2ec85eaf98407310b72eb73dda247
- -7 | 74687a12d3915d3c4d83f1af7b3683d5
- -6 | 596a3d04481816330f07e4f97510c28f
- -5 | 47c1b025fa18ea96c33fbb6718688c0f
- -4 | 0267aaf632e87a63288a08331f22c7c3
- -3 | b3149ecea4628efd23d2f86e5a723472
- -2 | 5d7b9adcbe1c629ec722529dd12e5129
- -1 | 6bb61e3b7bce0931da574d19d1d82c88
- 0 | cfcd208495d565ef66e7dff9f98764da
- 1 | c4ca4238a0b923820dcc509a6f75849b
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3
- 5 | e4da3b7fbbce2345d7772b0674a318d5
- 7 | 8f14e45fceea167a5a36dedd4bea2543
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26
- 10 | d3d9446802a44259755d38e6d163e820
+ -10 | c171d4ec282b23db89a99880cd624e9b
+ -9 | d5c534fde62beb89c745a59952c8efed
+ -8 | e91592205d3881e3ea35d66973bb4898
+ -7 | a770d3270c9dcdedf12ed9fd70444f7c
+ -6 | 03b26944890929ff751653acb2f2af79
+ -5 | 37aa1ccf80e481832b2db282d4d4f895
+ -4 | e5e0093f285a4fb94c3fcc2ad7fd04ed
+ -3 | 615bdd17c2556f82f384392ea8557f8c
+ -2 | cf3bae39dd692048a8bf961182e6a34d
+ -1 | 1bad6b8cf97131fceab8543e81f77571
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 1 | 6b86b273ff34fce19d6b804eff5a3f57
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 3 | 4e07408562bedb8b60ce05c1decfe3ad
+ 5 | ef2d127de37b942baad06145e54b0c61
+ 7 | 7902699be42c8a8e46fbbb4501726517
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 9 | 19581e27de7ced00ff1ce50b2047e7a5
+ 10 | 4a44dc15364204a80fe80e9039455cc1
12 | xxx
4 | yyy
(21 rows)
@@ -2557,6 +2551,252 @@ INSERT INTO document VALUES (1, (SELECT cid from category WHERE cname = 'novel')
INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: View with WITH CHECK OPTION
ERROR: new row violates row-level security policy for table "document"
+--
+-- MERGE
+--
+RESET SESSION AUTHORIZATION;
+DROP POLICY p3_with_all ON document;
+ALTER TABLE document ADD COLUMN dnotes text DEFAULT '';
+-- all documents are readable
+CREATE POLICY p1 ON document FOR SELECT USING (true);
+-- one may insert documents only authored by them
+CREATE POLICY p2 ON document FOR INSERT WITH CHECK (dauthor = current_user);
+-- one may only update documents in 'novel' category and new dlevel must be > 0
+CREATE POLICY p3 ON document FOR UPDATE
+ USING (cid = (SELECT cid from category WHERE cname = 'novel'))
+ WITH CHECK (dlevel > 0);
+-- one may only delete documents in 'manga' category
+CREATE POLICY p4 ON document FOR DELETE
+ USING (cid = (SELECT cid from category WHERE cname = 'manga'));
+SELECT * FROM document;
+ did | cid | dlevel | dauthor | dtitle | dnotes
+-----+-----+--------+-------------------+----------------------------------+--------
+ 1 | 11 | 1 | regress_rls_bob | my first novel |
+ 2 | 11 | 2 | regress_rls_bob | my first novel |
+ 3 | 22 | 2 | regress_rls_bob | my science fiction |
+ 4 | 44 | 1 | regress_rls_bob | my first manga |
+ 5 | 44 | 2 | regress_rls_bob | my second manga |
+ 6 | 22 | 1 | regress_rls_carol | great science fiction |
+ 7 | 33 | 2 | regress_rls_carol | great technology book |
+ 8 | 44 | 1 | regress_rls_carol | great manga |
+ 9 | 22 | 1 | regress_rls_dave | awesome science fiction |
+ 10 | 33 | 2 | regress_rls_dave | awesome technology book |
+ 11 | 33 | 1 | regress_rls_carol | hoge |
+ 33 | 22 | 1 | regress_rls_bob | okay science fiction |
+ 78 | 33 | 1 | regress_rls_bob | some technology novel |
+ 79 | 33 | 1 | regress_rls_bob | technology book, can only insert |
+(14 rows)
+
+SET SESSION AUTHORIZATION regress_rls_bob;
+-- Fails, since update violates WITH CHECK qual on dlevel
+MERGE INTO document d
+USING (SELECT 1 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge1 ', dlevel = 0;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+ERROR: new row violates row-level security policy for table "document"
+-- Should be OK since USING and WITH CHECK quals pass
+MERGE INTO document d
+USING (SELECT 1 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge2 ';
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+-- Even when dlevel is updated explicitly, but to the existing value
+MERGE INTO document d
+USING (SELECT 1 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge3 ', dlevel = 1;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+-- There is a MATCH for did = 3, but UPDATE's USING qual does not allow
+-- updating an item in category 'science fiction'
+MERGE INTO document d
+USING (SELECT 3 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge ';
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+ERROR: target row violates row-level security policy (USING expression) for table "document"
+-- The same thing with DELETE action, but fails again because no permissions
+-- to delete items in 'science fiction' category that did 3 belongs to.
+MERGE INTO document d
+USING (SELECT 3 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ DELETE;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+ERROR: target row violates row-level security policy (USING expression) for table "document"
+-- Document with did 4 belongs to 'manga' category which is allowed for
+-- deletion. But this fails because the UPDATE action is matched first and
+-- UPDATE policy does not allow updation in the category.
+MERGE INTO document d
+USING (SELECT 4 as sdid) s
+ON did = s.sdid
+WHEN MATCHED AND dnotes = '' THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge '
+WHEN MATCHED THEN
+ DELETE;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+ERROR: target row violates row-level security policy (USING expression) for table "document"
+-- UPDATE action is not matched this time because of the WHEN qual.
+-- DELETE still fails because role regress_rls_bob does not have SELECT
+-- privileges on 'manga' category row in the category table.
+MERGE INTO document d
+USING (SELECT 4 as sdid) s
+ON did = s.sdid
+WHEN MATCHED AND dnotes <> '' THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge '
+WHEN MATCHED THEN
+ DELETE;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+ERROR: target row violates row-level security policy (USING expression) for table "document"
+-- OK if DELETE is replaced with DO NOTHING
+MERGE INTO document d
+USING (SELECT 4 as sdid) s
+ON did = s.sdid
+WHEN MATCHED AND dnotes <> '' THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge '
+WHEN MATCHED THEN
+ DO NOTHING;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+SELECT * FROM document WHERE did = 4;
+ did | cid | dlevel | dauthor | dtitle | dnotes
+-----+-----+--------+-----------------+----------------+--------
+ 4 | 44 | 1 | regress_rls_bob | my first manga |
+(1 row)
+
+-- Switch to regress_rls_carol role and try the DELETE again. It should succeed
+-- this time
+RESET SESSION AUTHORIZATION;
+SET SESSION AUTHORIZATION regress_rls_carol;
+MERGE INTO document d
+USING (SELECT 4 as sdid) s
+ON did = s.sdid
+WHEN MATCHED AND dnotes <> '' THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge '
+WHEN MATCHED THEN
+ DELETE;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+-- Switch back to regress_rls_bob role
+RESET SESSION AUTHORIZATION;
+SET SESSION AUTHORIZATION regress_rls_bob;
+-- Try INSERT action. This fails because we are trying to insert
+-- dauthor = regress_rls_dave and INSERT's WITH CHECK does not allow
+-- that
+MERGE INTO document d
+USING (SELECT 12 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ DELETE
+WHEN NOT MATCHED THEN
+ INSERT VALUES (12, 11, 1, 'regress_rls_dave', 'another novel');
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+ERROR: new row violates row-level security policy for table "document"
+-- This should be fine
+MERGE INTO document d
+USING (SELECT 12 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ DELETE
+WHEN NOT MATCHED THEN
+ INSERT VALUES (12, 11, 1, 'regress_rls_bob', 'another novel');
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+-- ok
+MERGE INTO document d
+USING (SELECT 1 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge4 '
+WHEN NOT MATCHED THEN
+ INSERT VALUES (12, 11, 1, 'regress_rls_bob', 'another novel');
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+-- drop and create a new SELECT policy which prevents us from reading
+-- any document except with category 'novel'
+RESET SESSION AUTHORIZATION;
+DROP POLICY p1 ON document;
+CREATE POLICY p1 ON document FOR SELECT
+ USING (cid = (SELECT cid from category WHERE cname = 'novel'));
+SET SESSION AUTHORIZATION regress_rls_bob;
+-- MERGE can no longer see the matching row and hence attempts the
+-- NOT MATCHED action, which results in unique key violation
+MERGE INTO document d
+USING (SELECT 7 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge5 '
+WHEN NOT MATCHED THEN
+ INSERT VALUES (12, 11, 1, 'regress_rls_bob', 'another novel');
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+ERROR: duplicate key value violates unique constraint "document_pkey"
+-- UPDATE action fails if new row is not visible
+MERGE INTO document d
+USING (SELECT 1 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge6 ',
+ cid = (SELECT cid from category WHERE cname = 'technology');
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+ERROR: new row violates row-level security policy for table "document"
+-- but OK if new row is visible
+MERGE INTO document d
+USING (SELECT 1 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge7 ',
+ cid = (SELECT cid from category WHERE cname = 'novel');
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+-- OK to insert a new row that is not visible
+MERGE INTO document d
+USING (SELECT 13 as sdid) s
+ON did = s.sdid
+WHEN MATCHED THEN
+ UPDATE SET dnotes = dnotes || ' notes added by merge8 '
+WHEN NOT MATCHED THEN
+ INSERT VALUES (13, 44, 1, 'regress_rls_bob', 'new manga');
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: MERGE command
+RESET SESSION AUTHORIZATION;
+-- drop the restrictive SELECT policy so that we can look at the
+-- final state of the table
+DROP POLICY p1 ON document;
+-- Just check everything went per plan
+SELECT * FROM document;
+ did | cid | dlevel | dauthor | dtitle | dnotes
+-----+-----+--------+-------------------+----------------------------------+----------------------------------------------------------------------------------------------
+ 1 | 11 | 1 | regress_rls_bob | my first novel | notes added by merge2 notes added by merge3 notes added by merge4 notes added by merge7
+ 2 | 11 | 2 | regress_rls_bob | my first novel |
+ 3 | 22 | 2 | regress_rls_bob | my science fiction |
+ 5 | 44 | 2 | regress_rls_bob | my second manga |
+ 6 | 22 | 1 | regress_rls_carol | great science fiction |
+ 7 | 33 | 2 | regress_rls_carol | great technology book |
+ 8 | 44 | 1 | regress_rls_carol | great manga |
+ 9 | 22 | 1 | regress_rls_dave | awesome science fiction |
+ 10 | 33 | 2 | regress_rls_dave | awesome technology book |
+ 11 | 33 | 1 | regress_rls_carol | hoge |
+ 12 | 11 | 1 | regress_rls_bob | another novel |
+ 13 | 44 | 1 | regress_rls_bob | new manga |
+ 33 | 22 | 1 | regress_rls_bob | okay science fiction |
+ 78 | 33 | 1 | regress_rls_bob | some technology novel |
+ 79 | 33 | 1 | regress_rls_bob | technology book, can only insert |
+(15 rows)
+
--
-- ROLE/GROUP
--
@@ -2929,6 +3169,7 @@ ERROR: permission denied for view rls_view
-- Query as role that is not the owner of the table or view with permissions.
SET SESSION AUTHORIZATION regress_rls_bob;
GRANT SELECT ON rls_view TO regress_rls_carol;
+SET SESSION AUTHORIZATION regress_rls_carol;
SELECT * FROM rls_view;
NOTICE: f_leak => bbb
NOTICE: f_leak => dad
@@ -2947,6 +3188,309 @@ EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
Optimizer: GPORCA
(4 rows)
+-- Policy requiring access to another table.
+SET SESSION AUTHORIZATION regress_rls_alice;
+CREATE TABLE z1_blacklist (a int);
+INSERT INTO z1_blacklist VALUES (3), (4);
+CREATE POLICY p3 ON z1 AS RESTRICTIVE USING (a NOT IN (SELECT a FROM z1_blacklist));
+-- Query as role that is not owner of table but is owner of view without permissions.
+SET SESSION AUTHORIZATION regress_rls_bob;
+SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+-- Query as role that is not the owner of the table or view without permissions.
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+-- Query as role that is not owner of table but is owner of view with permissions.
+SET SESSION AUTHORIZATION regress_rls_alice;
+GRANT SELECT ON z1_blacklist TO regress_rls_bob;
+SET SESSION AUTHORIZATION regress_rls_bob;
+SELECT * FROM rls_view;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+NOTICE: f_leak => bbb
+ a | b
+---+-----
+ 2 | bbb
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ QUERY PLAN
+-------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b))
+ SubPlan 1
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on z1_blacklist
+(7 rows)
+
+-- Query as role that is not the owner of the table or view with permissions.
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM rls_view;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+NOTICE: f_leak => bbb
+ a | b
+---+-----
+ 2 | bbb
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ QUERY PLAN
+-------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b))
+ SubPlan 1
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on z1_blacklist
+(7 rows)
+
+SET SESSION AUTHORIZATION regress_rls_alice;
+REVOKE SELECT ON z1_blacklist FROM regress_rls_bob;
+DROP POLICY p3 ON z1;
+SET SESSION AUTHORIZATION regress_rls_bob;
+DROP VIEW rls_view;
+--
+-- Security invoker views should follow policy for current user.
+--
+-- View and table owner are the same.
+SET SESSION AUTHORIZATION regress_rls_alice;
+CREATE VIEW rls_view WITH (security_invoker) AS
+ SELECT * FROM z1 WHERE f_leak(b);
+GRANT SELECT ON rls_view TO regress_rls_bob;
+GRANT SELECT ON rls_view TO regress_rls_carol;
+-- Query as table owner. Should return all records.
+SELECT * FROM rls_view;
+NOTICE: f_leak => aba
+NOTICE: f_leak => bbb
+NOTICE: f_leak => ccc
+NOTICE: f_leak => dad
+ a | b
+---+-----
+ 1 | aba
+ 2 | bbb
+ 3 | ccc
+ 4 | dad
+(4 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+ QUERY PLAN
+-------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: f_leak(b)
+(4 rows)
+
+-- Queries as other users.
+-- Should return records based on current user's policies.
+SET SESSION AUTHORIZATION regress_rls_bob;
+SELECT * FROM rls_view;
+NOTICE: f_leak => bbb
+NOTICE: f_leak => dad
+ a | b
+---+-----
+ 2 | bbb
+ 4 | dad
+(2 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+ QUERY PLAN
+-------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: (((a % 2) = 0) AND f_leak(b))
+(4 rows)
+
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM rls_view;
+NOTICE: f_leak => aba
+NOTICE: f_leak => ccc
+ a | b
+---+-----
+ 1 | aba
+ 3 | ccc
+(2 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+ QUERY PLAN
+-------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: (((a % 2) = 1) AND f_leak(b))
+(4 rows)
+
+-- View and table owners are different.
+SET SESSION AUTHORIZATION regress_rls_alice;
+DROP VIEW rls_view;
+SET SESSION AUTHORIZATION regress_rls_bob;
+CREATE VIEW rls_view WITH (security_invoker) AS
+ SELECT * FROM z1 WHERE f_leak(b);
+GRANT SELECT ON rls_view TO regress_rls_alice;
+GRANT SELECT ON rls_view TO regress_rls_carol;
+-- Query as table owner. Should return all records.
+SET SESSION AUTHORIZATION regress_rls_alice;
+SELECT * FROM rls_view;
+NOTICE: f_leak => aba
+NOTICE: f_leak => bbb
+NOTICE: f_leak => ccc
+NOTICE: f_leak => dad
+ a | b
+---+-----
+ 1 | aba
+ 2 | bbb
+ 3 | ccc
+ 4 | dad
+(4 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+ QUERY PLAN
+-------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: f_leak(b)
+(4 rows)
+
+-- Queries as other users.
+-- Should return records based on current user's policies.
+SET SESSION AUTHORIZATION regress_rls_bob;
+SELECT * FROM rls_view;
+NOTICE: f_leak => bbb
+NOTICE: f_leak => dad
+ a | b
+---+-----
+ 2 | bbb
+ 4 | dad
+(2 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+ QUERY PLAN
+-------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: (((a % 2) = 0) AND f_leak(b))
+(4 rows)
+
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM rls_view;
+NOTICE: f_leak => aba
+NOTICE: f_leak => ccc
+ a | b
+---+-----
+ 1 | aba
+ 3 | ccc
+(2 rows)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+ QUERY PLAN
+-------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: (((a % 2) = 1) AND f_leak(b))
+(4 rows)
+
+-- Policy requiring access to another table.
+SET SESSION AUTHORIZATION regress_rls_alice;
+CREATE POLICY p3 ON z1 AS RESTRICTIVE USING (a NOT IN (SELECT a FROM z1_blacklist));
+-- Query as role that is not owner of table but is owner of view without permissions.
+SET SESSION AUTHORIZATION regress_rls_bob;
+SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+-- Query as role that is not the owner of the table or view without permissions.
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+-- Query as role that is not owner of table but is owner of view with permissions.
+SET SESSION AUTHORIZATION regress_rls_alice;
+GRANT SELECT ON z1_blacklist TO regress_rls_bob;
+SET SESSION AUTHORIZATION regress_rls_bob;
+SELECT * FROM rls_view;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+NOTICE: f_leak => bbb
+ a | b
+---+-----
+ 2 | bbb
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ QUERY PLAN
+-------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b))
+ SubPlan 1
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on z1_blacklist
+(7 rows)
+
+-- Query as role that is not the owner of the table or view without permissions.
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied.
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ERROR: permission denied for table z1_blacklist
+-- Query as role that is not the owner of the table or view with permissions.
+SET SESSION AUTHORIZATION regress_rls_alice;
+GRANT SELECT ON z1_blacklist TO regress_rls_carol;
+SET SESSION AUTHORIZATION regress_rls_carol;
+SELECT * FROM rls_view;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+NOTICE: f_leak => aba
+ a | b
+---+-----
+ 1 | aba
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT * FROM rls_view;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Query has row level security enabled and security quals contain sublinks
+ QUERY PLAN
+-------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Seq Scan on z1
+ Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 1) AND f_leak(b))
+ SubPlan 1
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on z1_blacklist
+(7 rows)
+
SET SESSION AUTHORIZATION regress_rls_bob;
DROP VIEW rls_view;
--
@@ -3126,42 +3670,42 @@ DROP VIEW rls_sbv;
-- Expression structure
--
SET SESSION AUTHORIZATION regress_rls_alice;
-INSERT INTO y2 (SELECT x, md5(x::text) FROM generate_series(0,20) x);
+INSERT INTO y2 (SELECT x, public.fipshash(x::text) FROM generate_series(0,20) x);
ANALYZE y2;
CREATE POLICY p2 ON y2 USING (a % 3 = 0);
CREATE POLICY p3 ON y2 USING (a % 4 = 0);
SET SESSION AUTHORIZATION regress_rls_bob;
SELECT * FROM y2 WHERE f_leak(b);
-NOTICE: f_leak => cfcd208495d565ef66e7dff9f98764da
-NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c
-NOTICE: f_leak => eccbc87e4b5ce2fe28308fd9f2a7baf3
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
-NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d
-NOTICE: f_leak => 45c48cce2e2d7fbdea1afc51c7c6ad26
-NOTICE: f_leak => d3d9446802a44259755d38e6d163e820
-NOTICE: f_leak => c20ad4d76fe97759aa27a0c99bff6710
-NOTICE: f_leak => aab3238922bcc25a6f606eb525ffdc56
-NOTICE: f_leak => 9bf31c7ff062936a96d3c8bd1f8f2ff3
-NOTICE: f_leak => c74d97b01eae257e44aa9d5bade97baf
-NOTICE: f_leak => 6f4922f45568161a8cdf4ad2299f6d23
-NOTICE: f_leak => 98f13708210194c475687be6106a3b84
+NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79
+NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03
+NOTICE: f_leak => 4e07408562bedb8b60ce05c1decfe3ad
+NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02
+NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f
+NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00
+NOTICE: f_leak => 19581e27de7ced00ff1ce50b2047e7a5
+NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1
+NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d
+NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc
+NOTICE: f_leak => e629fa6598d732768f7c726b4b621285
+NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc
+NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19
+NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3
a | b
----+----------------------------------
- 0 | cfcd208495d565ef66e7dff9f98764da
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26
- 10 | d3d9446802a44259755d38e6d163e820
- 12 | c20ad4d76fe97759aa27a0c99bff6710
- 14 | aab3238922bcc25a6f606eb525ffdc56
- 15 | 9bf31c7ff062936a96d3c8bd1f8f2ff3
- 16 | c74d97b01eae257e44aa9d5bade97baf
- 18 | 6f4922f45568161a8cdf4ad2299f6d23
- 20 | 98f13708210194c475687be6106a3b84
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 3 | 4e07408562bedb8b60ce05c1decfe3ad
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 9 | 19581e27de7ced00ff1ce50b2047e7a5
+ 10 | 4a44dc15364204a80fe80e9039455cc1
+ 12 | 6b51d431df5d7f141cbececcf79edf3d
+ 14 | 8527a891e224136950ff32ca212b45bc
+ 15 | e629fa6598d732768f7c726b4b621285
+ 16 | b17ef6d19c7a5b1ee83b907c595526dc
+ 18 | 4ec9599fc203d176a301536c2e091a19
+ 20 | f5ca38f748a1d6eaf726b8a42fb575c3
(14 rows)
EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak(b);
@@ -3200,20 +3744,20 @@ NOTICE: f_leak => abc
NOTICE: f_leak => abc
a | b
----+----------------------------------
- 0 | cfcd208495d565ef66e7dff9f98764da
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26
- 10 | d3d9446802a44259755d38e6d163e820
- 12 | c20ad4d76fe97759aa27a0c99bff6710
- 14 | aab3238922bcc25a6f606eb525ffdc56
- 15 | 9bf31c7ff062936a96d3c8bd1f8f2ff3
- 16 | c74d97b01eae257e44aa9d5bade97baf
- 18 | 6f4922f45568161a8cdf4ad2299f6d23
- 20 | 98f13708210194c475687be6106a3b84
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 3 | 4e07408562bedb8b60ce05c1decfe3ad
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 9 | 19581e27de7ced00ff1ce50b2047e7a5
+ 10 | 4a44dc15364204a80fe80e9039455cc1
+ 12 | 6b51d431df5d7f141cbececcf79edf3d
+ 14 | 8527a891e224136950ff32ca212b45bc
+ 15 | e629fa6598d732768f7c726b4b621285
+ 16 | b17ef6d19c7a5b1ee83b907c595526dc
+ 18 | 4ec9599fc203d176a301536c2e091a19
+ 20 | f5ca38f748a1d6eaf726b8a42fb575c3
(14 rows)
EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak('abc');
@@ -3255,20 +3799,20 @@ EXPLAIN (COSTS OFF) SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE
(10 rows)
SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(b);
-NOTICE: f_leak => cfcd208495d565ef66e7dff9f98764da
-NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c
-NOTICE: f_leak => eccbc87e4b5ce2fe28308fd9f2a7baf3
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
-NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d
-NOTICE: f_leak => 45c48cce2e2d7fbdea1afc51c7c6ad26
-NOTICE: f_leak => d3d9446802a44259755d38e6d163e820
-NOTICE: f_leak => c20ad4d76fe97759aa27a0c99bff6710
-NOTICE: f_leak => aab3238922bcc25a6f606eb525ffdc56
-NOTICE: f_leak => 9bf31c7ff062936a96d3c8bd1f8f2ff3
-NOTICE: f_leak => c74d97b01eae257e44aa9d5bade97baf
-NOTICE: f_leak => 6f4922f45568161a8cdf4ad2299f6d23
-NOTICE: f_leak => 98f13708210194c475687be6106a3b84
+NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79
+NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03
+NOTICE: f_leak => 4e07408562bedb8b60ce05c1decfe3ad
+NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02
+NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f
+NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00
+NOTICE: f_leak => 19581e27de7ced00ff1ce50b2047e7a5
+NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1
+NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d
+NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc
+NOTICE: f_leak => e629fa6598d732768f7c726b4b621285
+NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc
+NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19
+NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3
a | b | abc
---+---+-----
(0 rows)
@@ -3352,33 +3896,33 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
CREATE POLICY p1 ON t1 USING (a % 2 = 0);
ALTER TABLE t1 ENABLE ROW LEVEL SECURITY;
GRANT ALL ON t1 TO regress_rls_bob;
-INSERT INTO t1 (SELECT x, md5(x::text) FROM generate_series(0,20) x);
+INSERT INTO t1 (SELECT x, public.fipshash(x::text) FROM generate_series(0,20) x);
SET SESSION AUTHORIZATION regress_rls_bob;
WITH cte1 AS MATERIALIZED (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1;
-NOTICE: f_leak => cfcd208495d565ef66e7dff9f98764da
-NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c
-NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c
-NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc
-NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d
-NOTICE: f_leak => d3d9446802a44259755d38e6d163e820
-NOTICE: f_leak => c20ad4d76fe97759aa27a0c99bff6710
-NOTICE: f_leak => aab3238922bcc25a6f606eb525ffdc56
-NOTICE: f_leak => c74d97b01eae257e44aa9d5bade97baf
-NOTICE: f_leak => 6f4922f45568161a8cdf4ad2299f6d23
-NOTICE: f_leak => 98f13708210194c475687be6106a3b84
+NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79
+NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03
+NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02
+NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f
+NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00
+NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1
+NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d
+NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc
+NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc
+NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19
+NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3
a | b
----+----------------------------------
- 0 | cfcd208495d565ef66e7dff9f98764da
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 10 | d3d9446802a44259755d38e6d163e820
- 12 | c20ad4d76fe97759aa27a0c99bff6710
- 14 | aab3238922bcc25a6f606eb525ffdc56
- 16 | c74d97b01eae257e44aa9d5bade97baf
- 18 | 6f4922f45568161a8cdf4ad2299f6d23
- 20 | 98f13708210194c475687be6106a3b84
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 10 | 4a44dc15364204a80fe80e9039455cc1
+ 12 | 6b51d431df5d7f141cbececcf79edf3d
+ 14 | 8527a891e224136950ff32ca212b45bc
+ 16 | b17ef6d19c7a5b1ee83b907c595526dc
+ 18 | 4ec9599fc203d176a301536c2e091a19
+ 20 | f5ca38f748a1d6eaf726b8a42fb575c3
(11 rows)
EXPLAIN (COSTS OFF)
@@ -3403,17 +3947,17 @@ INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: View with WITH CHECK OPTION
a | b
----+----------------------------------
- 0 | cfcd208495d565ef66e7dff9f98764da
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 10 | d3d9446802a44259755d38e6d163e820
- 12 | c20ad4d76fe97759aa27a0c99bff6710
- 14 | aab3238922bcc25a6f606eb525ffdc56
- 16 | c74d97b01eae257e44aa9d5bade97baf
- 18 | 6f4922f45568161a8cdf4ad2299f6d23
- 20 | 98f13708210194c475687be6106a3b84
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 10 | 4a44dc15364204a80fe80e9039455cc1
+ 12 | 6b51d431df5d7f141cbececcf79edf3d
+ 14 | 8527a891e224136950ff32ca212b45bc
+ 16 | b17ef6d19c7a5b1ee83b907c595526dc
+ 18 | 4ec9599fc203d176a301536c2e091a19
+ 20 | f5ca38f748a1d6eaf726b8a42fb575c3
(11 rows)
WITH cte1 AS (INSERT INTO t1 VALUES (21, 'Fail') RETURNING *) SELECT * FROM cte1; --fail
@@ -3477,17 +4021,17 @@ EXPLAIN (COSTS OFF) INSERT INTO t2 (SELECT * FROM t1);
SELECT * FROM t2;
a | b
----+----------------------------------
- 0 | cfcd208495d565ef66e7dff9f98764da
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 10 | d3d9446802a44259755d38e6d163e820
- 12 | c20ad4d76fe97759aa27a0c99bff6710
- 14 | aab3238922bcc25a6f606eb525ffdc56
- 16 | c74d97b01eae257e44aa9d5bade97baf
- 18 | 6f4922f45568161a8cdf4ad2299f6d23
- 20 | 98f13708210194c475687be6106a3b84
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 10 | 4a44dc15364204a80fe80e9039455cc1
+ 12 | 6b51d431df5d7f141cbececcf79edf3d
+ 14 | 8527a891e224136950ff32ca212b45bc
+ 16 | b17ef6d19c7a5b1ee83b907c595526dc
+ 18 | 4ec9599fc203d176a301536c2e091a19
+ 20 | f5ca38f748a1d6eaf726b8a42fb575c3
20 | Success
(12 rows)
@@ -3504,17 +4048,17 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entr
SELECT * FROM t3;
a | b
----+----------------------------------
- 0 | cfcd208495d565ef66e7dff9f98764da
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 10 | d3d9446802a44259755d38e6d163e820
- 12 | c20ad4d76fe97759aa27a0c99bff6710
- 14 | aab3238922bcc25a6f606eb525ffdc56
- 16 | c74d97b01eae257e44aa9d5bade97baf
- 18 | 6f4922f45568161a8cdf4ad2299f6d23
- 20 | 98f13708210194c475687be6106a3b84
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 10 | 4a44dc15364204a80fe80e9039455cc1
+ 12 | 6b51d431df5d7f141cbececcf79edf3d
+ 14 | 8527a891e224136950ff32ca212b45bc
+ 16 | b17ef6d19c7a5b1ee83b907c595526dc
+ 18 | 4ec9599fc203d176a301536c2e091a19
+ 20 | f5ca38f748a1d6eaf726b8a42fb575c3
20 | Success
(12 rows)
@@ -3523,17 +4067,17 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entr
SELECT * FROM t4;
a | b
----+----------------------------------
- 0 | cfcd208495d565ef66e7dff9f98764da
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 10 | d3d9446802a44259755d38e6d163e820
- 12 | c20ad4d76fe97759aa27a0c99bff6710
- 14 | aab3238922bcc25a6f606eb525ffdc56
- 16 | c74d97b01eae257e44aa9d5bade97baf
- 18 | 6f4922f45568161a8cdf4ad2299f6d23
- 20 | 98f13708210194c475687be6106a3b84
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 10 | 4a44dc15364204a80fe80e9039455cc1
+ 12 | 6b51d431df5d7f141cbececcf79edf3d
+ 14 | 8527a891e224136950ff32ca212b45bc
+ 16 | b17ef6d19c7a5b1ee83b907c595526dc
+ 18 | 4ec9599fc203d176a301536c2e091a19
+ 20 | f5ca38f748a1d6eaf726b8a42fb575c3
20 | Success
(12 rows)
@@ -3610,27 +4154,27 @@ RESET SESSION AUTHORIZATION;
SELECT * FROM t1;
a | b
----+----------------------------------
- 1 | c4ca4238a0b923820dcc509a6f75849b
- 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3
- 5 | e4da3b7fbbce2345d7772b0674a318d5
- 7 | 8f14e45fceea167a5a36dedd4bea2543
- 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26
- 11 | 6512bd43d9caa6e02c990b0a82652dca
- 13 | c51ce410c124a10e0db5e4b97fc2af39
- 15 | 9bf31c7ff062936a96d3c8bd1f8f2ff3
- 17 | 70efdf2ec9b086079795c442636b55fb
- 19 | 1f0e3dad99908345f7439f8ffabdffc4
- 0 | cfcd208495d565ef66e7dff9f98764da
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 10 | d3d9446802a44259755d38e6d163e820
- 12 | c20ad4d76fe97759aa27a0c99bff6710
- 14 | aab3238922bcc25a6f606eb525ffdc56
- 16 | c74d97b01eae257e44aa9d5bade97baf
- 18 | 6f4922f45568161a8cdf4ad2299f6d23
- 20 | 98f13708210194c475687be6106a3b84
+ 1 | 6b86b273ff34fce19d6b804eff5a3f57
+ 3 | 4e07408562bedb8b60ce05c1decfe3ad
+ 5 | ef2d127de37b942baad06145e54b0c61
+ 7 | 7902699be42c8a8e46fbbb4501726517
+ 9 | 19581e27de7ced00ff1ce50b2047e7a5
+ 11 | 4fc82b26aecb47d2868c4efbe3581732
+ 13 | 3fdba35f04dc8c462986c992bcf87554
+ 15 | e629fa6598d732768f7c726b4b621285
+ 17 | 4523540f1504cd17100c4835e85b7eef
+ 19 | 9400f1b21cb527d7fa3d3eabba93557a
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 10 | 4a44dc15364204a80fe80e9039455cc1
+ 12 | 6b51d431df5d7f141cbececcf79edf3d
+ 14 | 8527a891e224136950ff32ca212b45bc
+ 16 | b17ef6d19c7a5b1ee83b907c595526dc
+ 18 | 4ec9599fc203d176a301536c2e091a19
+ 20 | f5ca38f748a1d6eaf726b8a42fb575c3
20 | Success
(22 rows)
@@ -3647,27 +4191,27 @@ SET SESSION AUTHORIZATION regress_rls_alice;
SELECT * FROM t1;
a | b
----+----------------------------------
- 1 | c4ca4238a0b923820dcc509a6f75849b
- 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3
- 5 | e4da3b7fbbce2345d7772b0674a318d5
- 7 | 8f14e45fceea167a5a36dedd4bea2543
- 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26
- 11 | 6512bd43d9caa6e02c990b0a82652dca
- 13 | c51ce410c124a10e0db5e4b97fc2af39
- 15 | 9bf31c7ff062936a96d3c8bd1f8f2ff3
- 17 | 70efdf2ec9b086079795c442636b55fb
- 19 | 1f0e3dad99908345f7439f8ffabdffc4
- 0 | cfcd208495d565ef66e7dff9f98764da
- 2 | c81e728d9d4c2f636f067f89cc14862c
- 4 | a87ff679a2f3e71d9181a67b7542122c
- 6 | 1679091c5a880faf6fb5e6087eb1b2dc
- 8 | c9f0f895fb98ab9159f51fd0297e236d
- 10 | d3d9446802a44259755d38e6d163e820
- 12 | c20ad4d76fe97759aa27a0c99bff6710
- 14 | aab3238922bcc25a6f606eb525ffdc56
- 16 | c74d97b01eae257e44aa9d5bade97baf
- 18 | 6f4922f45568161a8cdf4ad2299f6d23
- 20 | 98f13708210194c475687be6106a3b84
+ 1 | 6b86b273ff34fce19d6b804eff5a3f57
+ 3 | 4e07408562bedb8b60ce05c1decfe3ad
+ 5 | ef2d127de37b942baad06145e54b0c61
+ 7 | 7902699be42c8a8e46fbbb4501726517
+ 9 | 19581e27de7ced00ff1ce50b2047e7a5
+ 11 | 4fc82b26aecb47d2868c4efbe3581732
+ 13 | 3fdba35f04dc8c462986c992bcf87554
+ 15 | e629fa6598d732768f7c726b4b621285
+ 17 | 4523540f1504cd17100c4835e85b7eef
+ 19 | 9400f1b21cb527d7fa3d3eabba93557a
+ 0 | 5feceb66ffc86f38d952786c6d696c79
+ 2 | d4735e3a265e16eee03f59718b9b5d03
+ 4 | 4b227777d4dd1fc61c6f884f48641d02
+ 6 | e7f6c011776e8db7cd330b54174fd76f
+ 8 | 2c624232cdd221771294dfbb310aca00
+ 10 | 4a44dc15364204a80fe80e9039455cc1
+ 12 | 6b51d431df5d7f141cbececcf79edf3d
+ 14 | 8527a891e224136950ff32ca212b45bc
+ 16 | b17ef6d19c7a5b1ee83b907c595526dc
+ 18 | 4ec9599fc203d176a301536c2e091a19
+ 20 | f5ca38f748a1d6eaf726b8a42fb575c3
20 | Success
(22 rows)
@@ -3723,35 +4267,35 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
CREATE POLICY p1 ON copy_t USING (a % 2 = 0);
ALTER TABLE copy_t ENABLE ROW LEVEL SECURITY;
GRANT ALL ON copy_t TO regress_rls_bob, regress_rls_exempt_user;
-INSERT INTO copy_t (SELECT x, md5(x::text) FROM generate_series(0,10) x);
+INSERT INTO copy_t (SELECT x, public.fipshash(x::text) FROM generate_series(0,10) x);
-- Check COPY TO as Superuser/owner.
RESET SESSION AUTHORIZATION;
SET row_security TO OFF;
COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ',';
-0,cfcd208495d565ef66e7dff9f98764da
-1,c4ca4238a0b923820dcc509a6f75849b
-2,c81e728d9d4c2f636f067f89cc14862c
-3,eccbc87e4b5ce2fe28308fd9f2a7baf3
-4,a87ff679a2f3e71d9181a67b7542122c
-5,e4da3b7fbbce2345d7772b0674a318d5
-6,1679091c5a880faf6fb5e6087eb1b2dc
-7,8f14e45fceea167a5a36dedd4bea2543
-8,c9f0f895fb98ab9159f51fd0297e236d
-9,45c48cce2e2d7fbdea1afc51c7c6ad26
-10,d3d9446802a44259755d38e6d163e820
+0,5feceb66ffc86f38d952786c6d696c79
+1,6b86b273ff34fce19d6b804eff5a3f57
+2,d4735e3a265e16eee03f59718b9b5d03
+3,4e07408562bedb8b60ce05c1decfe3ad
+4,4b227777d4dd1fc61c6f884f48641d02
+5,ef2d127de37b942baad06145e54b0c61
+6,e7f6c011776e8db7cd330b54174fd76f
+7,7902699be42c8a8e46fbbb4501726517
+8,2c624232cdd221771294dfbb310aca00
+9,19581e27de7ced00ff1ce50b2047e7a5
+10,4a44dc15364204a80fe80e9039455cc1
SET row_security TO ON;
COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ',';
-0,cfcd208495d565ef66e7dff9f98764da
-1,c4ca4238a0b923820dcc509a6f75849b
-2,c81e728d9d4c2f636f067f89cc14862c
-3,eccbc87e4b5ce2fe28308fd9f2a7baf3
-4,a87ff679a2f3e71d9181a67b7542122c
-5,e4da3b7fbbce2345d7772b0674a318d5
-6,1679091c5a880faf6fb5e6087eb1b2dc
-7,8f14e45fceea167a5a36dedd4bea2543
-8,c9f0f895fb98ab9159f51fd0297e236d
-9,45c48cce2e2d7fbdea1afc51c7c6ad26
-10,d3d9446802a44259755d38e6d163e820
+0,5feceb66ffc86f38d952786c6d696c79
+1,6b86b273ff34fce19d6b804eff5a3f57
+2,d4735e3a265e16eee03f59718b9b5d03
+3,4e07408562bedb8b60ce05c1decfe3ad
+4,4b227777d4dd1fc61c6f884f48641d02
+5,ef2d127de37b942baad06145e54b0c61
+6,e7f6c011776e8db7cd330b54174fd76f
+7,7902699be42c8a8e46fbbb4501726517
+8,2c624232cdd221771294dfbb310aca00
+9,19581e27de7ced00ff1ce50b2047e7a5
+10,4a44dc15364204a80fe80e9039455cc1
-- Check COPY TO as user with permissions.
SET SESSION AUTHORIZATION regress_rls_bob;
SET row_security TO OFF;
@@ -3759,40 +4303,40 @@ COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail
ERROR: query would be affected by row-level security policy for table "copy_t"
SET row_security TO ON;
COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok
-0,cfcd208495d565ef66e7dff9f98764da
-2,c81e728d9d4c2f636f067f89cc14862c
-4,a87ff679a2f3e71d9181a67b7542122c
-6,1679091c5a880faf6fb5e6087eb1b2dc
-8,c9f0f895fb98ab9159f51fd0297e236d
-10,d3d9446802a44259755d38e6d163e820
+0,5feceb66ffc86f38d952786c6d696c79
+2,d4735e3a265e16eee03f59718b9b5d03
+4,4b227777d4dd1fc61c6f884f48641d02
+6,e7f6c011776e8db7cd330b54174fd76f
+8,2c624232cdd221771294dfbb310aca00
+10,4a44dc15364204a80fe80e9039455cc1
-- Check COPY TO as user with permissions and BYPASSRLS
SET SESSION AUTHORIZATION regress_rls_exempt_user;
SET row_security TO OFF;
COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok
-0,cfcd208495d565ef66e7dff9f98764da
-1,c4ca4238a0b923820dcc509a6f75849b
-2,c81e728d9d4c2f636f067f89cc14862c
-3,eccbc87e4b5ce2fe28308fd9f2a7baf3
-4,a87ff679a2f3e71d9181a67b7542122c
-5,e4da3b7fbbce2345d7772b0674a318d5
-6,1679091c5a880faf6fb5e6087eb1b2dc
-7,8f14e45fceea167a5a36dedd4bea2543
-8,c9f0f895fb98ab9159f51fd0297e236d
-9,45c48cce2e2d7fbdea1afc51c7c6ad26
-10,d3d9446802a44259755d38e6d163e820
+0,5feceb66ffc86f38d952786c6d696c79
+1,6b86b273ff34fce19d6b804eff5a3f57
+2,d4735e3a265e16eee03f59718b9b5d03
+3,4e07408562bedb8b60ce05c1decfe3ad
+4,4b227777d4dd1fc61c6f884f48641d02
+5,ef2d127de37b942baad06145e54b0c61
+6,e7f6c011776e8db7cd330b54174fd76f
+7,7902699be42c8a8e46fbbb4501726517
+8,2c624232cdd221771294dfbb310aca00
+9,19581e27de7ced00ff1ce50b2047e7a5
+10,4a44dc15364204a80fe80e9039455cc1
SET row_security TO ON;
COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok
-0,cfcd208495d565ef66e7dff9f98764da
-1,c4ca4238a0b923820dcc509a6f75849b
-2,c81e728d9d4c2f636f067f89cc14862c
-3,eccbc87e4b5ce2fe28308fd9f2a7baf3
-4,a87ff679a2f3e71d9181a67b7542122c
-5,e4da3b7fbbce2345d7772b0674a318d5
-6,1679091c5a880faf6fb5e6087eb1b2dc
-7,8f14e45fceea167a5a36dedd4bea2543
-8,c9f0f895fb98ab9159f51fd0297e236d
-9,45c48cce2e2d7fbdea1afc51c7c6ad26
-10,d3d9446802a44259755d38e6d163e820
+0,5feceb66ffc86f38d952786c6d696c79
+1,6b86b273ff34fce19d6b804eff5a3f57
+2,d4735e3a265e16eee03f59718b9b5d03
+3,4e07408562bedb8b60ce05c1decfe3ad
+4,4b227777d4dd1fc61c6f884f48641d02
+5,ef2d127de37b942baad06145e54b0c61
+6,e7f6c011776e8db7cd330b54174fd76f
+7,7902699be42c8a8e46fbbb4501726517
+8,2c624232cdd221771294dfbb310aca00
+9,19581e27de7ced00ff1ce50b2047e7a5
+10,4a44dc15364204a80fe80e9039455cc1
-- Check COPY TO as user without permissions. SET row_security TO OFF;
SET SESSION AUTHORIZATION regress_rls_carol;
SET row_security TO OFF;
@@ -3810,15 +4354,15 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur
CREATE POLICY p1 ON copy_rel_to USING (a % 2 = 0);
ALTER TABLE copy_rel_to ENABLE ROW LEVEL SECURITY;
GRANT ALL ON copy_rel_to TO regress_rls_bob, regress_rls_exempt_user;
-INSERT INTO copy_rel_to VALUES (1, md5('1'));
+INSERT INTO copy_rel_to VALUES (1, public.fipshash('1'));
-- Check COPY TO as Superuser/owner.
RESET SESSION AUTHORIZATION;
SET row_security TO OFF;
COPY copy_rel_to TO STDOUT WITH DELIMITER ',';
-1,c4ca4238a0b923820dcc509a6f75849b
+1,6b86b273ff34fce19d6b804eff5a3f57
SET row_security TO ON;
COPY copy_rel_to TO STDOUT WITH DELIMITER ',';
-1,c4ca4238a0b923820dcc509a6f75849b
+1,6b86b273ff34fce19d6b804eff5a3f57
-- Check COPY TO as user with permissions.
SET SESSION AUTHORIZATION regress_rls_bob;
SET row_security TO OFF;
@@ -3830,10 +4374,49 @@ COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok
SET SESSION AUTHORIZATION regress_rls_exempt_user;
SET row_security TO OFF;
COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok
-1,c4ca4238a0b923820dcc509a6f75849b
+1,6b86b273ff34fce19d6b804eff5a3f57
SET row_security TO ON;
COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok
-1,c4ca4238a0b923820dcc509a6f75849b
+1,6b86b273ff34fce19d6b804eff5a3f57
+-- Check COPY TO as user without permissions. SET row_security TO OFF;
+SET SESSION AUTHORIZATION regress_rls_carol;
+SET row_security TO OFF;
+COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied
+ERROR: permission denied for table copy_rel_to
+SET row_security TO ON;
+COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied
+ERROR: permission denied for table copy_rel_to
+-- Check behavior with a child table.
+RESET SESSION AUTHORIZATION;
+SET row_security TO ON;
+CREATE TABLE copy_rel_to_child () INHERITS (copy_rel_to);
+INSERT INTO copy_rel_to_child VALUES (1, 'one'), (2, 'two');
+-- Check COPY TO as Superuser/owner.
+RESET SESSION AUTHORIZATION;
+SET row_security TO OFF;
+COPY copy_rel_to TO STDOUT WITH DELIMITER ',';
+1,6b86b273ff34fce19d6b804eff5a3f57
+SET row_security TO ON;
+COPY copy_rel_to TO STDOUT WITH DELIMITER ',';
+1,6b86b273ff34fce19d6b804eff5a3f57
+-- Check COPY TO as user with permissions.
+SET SESSION AUTHORIZATION regress_rls_bob;
+SET row_security TO OFF;
+COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS
+ERROR: query would be affected by row-level security policy for table "copy_rel_to"
+SET row_security TO ON;
+COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Inherited tables
+2,two
+-- Check COPY TO as user with permissions and BYPASSRLS
+SET SESSION AUTHORIZATION regress_rls_exempt_user;
+SET row_security TO OFF;
+COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok
+1,6b86b273ff34fce19d6b804eff5a3f57
+SET row_security TO ON;
+COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok
+1,6b86b273ff34fce19d6b804eff5a3f57
-- Check COPY TO as user without permissions. SET row_security TO OFF;
SET SESSION AUTHORIZATION regress_rls_carol;
SET row_security TO OFF;
@@ -3872,6 +4455,7 @@ ERROR: permission denied for table copy_t
RESET SESSION AUTHORIZATION;
DROP TABLE copy_t;
DROP TABLE copy_rel_to CASCADE;
+NOTICE: drop cascades to table copy_rel_to_child
-- Check WHERE CURRENT OF
SET SESSION AUTHORIZATION regress_rls_alice;
CREATE TABLE current_check (currentid int, payload text, rlsuser text);
@@ -4128,30 +4712,6 @@ DROP ROLE regress_rls_frank; -- succeeds
ROLLBACK TO q;
ROLLBACK; -- cleanup
--
--- Converting table to view
---
-BEGIN;
-CREATE TABLE t (c int);
-NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c' as the Greenplum Database data distribution key for this table.
-HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-CREATE POLICY p ON t USING (c % 2 = 1);
-ALTER TABLE t ENABLE ROW LEVEL SECURITY;
-SAVEPOINT q;
-CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD
- SELECT * FROM generate_series(1,5) t0(c); -- fails due to row-level security enabled
-ERROR: could not convert table "t" to a view because it has row security enabled
-ROLLBACK TO q;
-ALTER TABLE t DISABLE ROW LEVEL SECURITY;
-SAVEPOINT q;
-CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD
- SELECT * FROM generate_series(1,5) t0(c); -- fails due to policy p on t
-ERROR: could not convert table "t" to a view because it has row security policies
-ROLLBACK TO q;
-DROP POLICY p ON t;
-CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD
- SELECT * FROM generate_series(1,5) t0(c); -- succeeds
-ROLLBACK;
---
-- Policy expression handling
--
BEGIN;
@@ -4726,15 +5286,135 @@ DETAIL: Falling back to Postgres-based planner because GPORCA does not support
--------------+---
(0 rows)
+-- make sure RLS dependencies in CTEs are handled
+reset role;
+create or replace function rls_f() returns setof rls_t
+ stable language sql
+ as $$ with cte as (select * from rls_t) select * from cte $$;
+prepare r as select current_user, * from rls_f();
+set role regress_rls_alice;
+execute r;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+-------------------+------------------
+ regress_rls_alice | invisible to bob
+(1 row)
+
+set role regress_rls_bob;
+execute r;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+--------------+---
+(0 rows)
+
+-- make sure RLS dependencies in subqueries are handled
+reset role;
+create or replace function rls_f() returns setof rls_t
+ stable language sql
+ as $$ select * from (select * from rls_t) _ $$;
+prepare s as select current_user, * from rls_f();
+set role regress_rls_alice;
+execute s;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+-------------------+------------------
+ regress_rls_alice | invisible to bob
+(1 row)
+
+set role regress_rls_bob;
+execute s;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+--------------+---
+(0 rows)
+
+-- make sure RLS dependencies in sublinks are handled
+reset role;
+create or replace function rls_f() returns setof rls_t
+ stable language sql
+ as $$ select exists(select * from rls_t)::text $$;
+prepare t as select current_user, * from rls_f();
+set role regress_rls_alice;
+execute t;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+-------------------+------
+ regress_rls_alice | true
+(1 row)
+
+set role regress_rls_bob;
+execute t;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+-----------------+-------
+ regress_rls_bob | false
+(1 row)
+
+-- make sure RLS dependencies are handled when coercion projections are inserted
+reset role;
+create or replace function rls_f() returns setof rls_t
+ stable language sql
+ as $$ select * from (select array_agg(c) as cs from rls_t) _ group by cs $$;
+prepare u as select current_user, * from rls_f();
+set role regress_rls_alice;
+execute u;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+-------------------+----------------------
+ regress_rls_alice | {"invisible to bob"}
+(1 row)
+
+set role regress_rls_bob;
+execute u;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+-----------------+---
+ regress_rls_bob |
+(1 row)
+
+-- make sure RLS dependencies in security invoker views are handled
+reset role;
+create view rls_v with (security_invoker) as select * from rls_t;
+grant select on rls_v to regress_rls_alice, regress_rls_bob;
+create or replace function rls_f() returns setof rls_t
+ stable language sql
+ as $$ select * from rls_v $$;
+prepare v as select current_user, * from rls_f();
+set role regress_rls_alice;
+execute v;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+-------------------+------------------
+ regress_rls_alice | invisible to bob
+(1 row)
+
+set role regress_rls_bob;
+execute v;
+INFO: GPORCA failed to produce a plan, falling back to Postgres-based planner
+DETAIL: Falling back to Postgres-based planner because GPORCA does not support the following feature: Non-default collation
+ current_user | c
+--------------+---
+(0 rows)
+
RESET ROLE;
DROP FUNCTION rls_f();
+DROP VIEW rls_v;
DROP TABLE rls_t;
--
-- Clean up objects
--
RESET SESSION AUTHORIZATION;
DROP SCHEMA regress_rls_schema CASCADE;
-NOTICE: drop cascades to 29 other objects
+NOTICE: drop cascades to 30 other objects
DETAIL: drop cascades to function f_leak(text)
drop cascades to table uaccount
drop cascades to table category
@@ -4752,6 +5432,7 @@ drop cascades to table b1
drop cascades to view bv1
drop cascades to table z1
drop cascades to table z2
+drop cascades to table z1_blacklist
drop cascades to table x1
drop cascades to table y1
drop cascades to table y2
diff --git a/src/test/regress/expected/select_distinct_optimizer.out b/src/test/regress/expected/select_distinct_optimizer.out
index a8f221ec110..d6c3f9bf89d 100644
--- a/src/test/regress/expected/select_distinct_optimizer.out
+++ b/src/test/regress/expected/select_distinct_optimizer.out
@@ -186,6 +186,27 @@ SET jit_above_cost TO DEFAULT;
CREATE TABLE distinct_group_2 AS
SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entry.
+SET enable_seqscan = 0;
+-- Check to see we get an incremental sort plan
+EXPLAIN (costs off)
+SELECT DISTINCT hundred, two FROM tenk1;
+ QUERY PLAN
+-----------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> GroupAggregate
+ Group Key: hundred, two
+ -> Sort
+ Sort Key: hundred, two
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: hundred, two
+ -> GroupAggregate
+ Group Key: hundred, two
+ -> Sort
+ Sort Key: hundred, two
+ -> Seq Scan on tenk1
+(13 rows)
+
+RESET enable_seqscan;
SET enable_hashagg=TRUE;
SET optimizer_enable_hashagg=TRUE;
-- Produce results with hash aggregation.
@@ -229,6 +250,157 @@ DROP TABLE distinct_hash_1;
DROP TABLE distinct_hash_2;
DROP TABLE distinct_group_1;
DROP TABLE distinct_group_2;
+-- Test parallel DISTINCT
+SET parallel_tuple_cost=0;
+SET parallel_setup_cost=0;
+SET min_parallel_table_scan_size=0;
+SET max_parallel_workers_per_gather=2;
+-- Ensure we get a parallel plan
+EXPLAIN (costs off)
+SELECT DISTINCT four FROM tenk1;
+ QUERY PLAN
+-----------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> GroupAggregate
+ Group Key: four
+ -> Sort
+ Sort Key: four
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: four
+ -> Streaming HashAggregate
+ Group Key: four
+ -> Seq Scan on tenk1
+(11 rows)
+
+-- Ensure the parallel plan produces the correct results
+SELECT DISTINCT four FROM tenk1;
+ four
+------
+ 0
+ 1
+ 2
+ 3
+(4 rows)
+
+CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$
+ BEGIN
+ RETURN a;
+ END;
+$$ LANGUAGE plpgsql PARALLEL UNSAFE;
+-- Ensure we don't do parallel distinct with a parallel unsafe function
+EXPLAIN (COSTS OFF)
+SELECT DISTINCT distinct_func(1) FROM tenk1;
+ QUERY PLAN
+---------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> HashAggregate
+ Group Key: (distinct_func(1))
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: (distinct_func(1))
+ -> Seq Scan on tenk1
+(7 rows)
+
+-- make the function parallel safe
+CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$
+ BEGIN
+ RETURN a;
+ END;
+$$ LANGUAGE plpgsql PARALLEL SAFE;
+-- Ensure we do parallel distinct now that the function is parallel safe
+EXPLAIN (COSTS OFF)
+SELECT DISTINCT distinct_func(1) FROM tenk1;
+ QUERY PLAN
+---------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> HashAggregate
+ Group Key: (distinct_func(1))
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: (distinct_func(1))
+ -> Seq Scan on tenk1
+(7 rows)
+
+RESET max_parallel_workers_per_gather;
+RESET min_parallel_table_scan_size;
+RESET parallel_setup_cost;
+RESET parallel_tuple_cost;
+--
+-- Test the planner's ability to use a LIMIT 1 instead of a Unique node when
+-- all of the distinct_pathkeys have been marked as redundant
+--
+-- Ensure we get a plan with a Limit 1
+EXPLAIN (COSTS OFF)
+SELECT DISTINCT four FROM tenk1 WHERE four = 0;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> GroupAggregate
+ Group Key: four
+ -> Sort
+ Sort Key: four
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: four
+ -> Streaming HashAggregate
+ Group Key: four
+ -> Seq Scan on tenk1
+ Filter: (four = 0)
+(12 rows)
+
+-- Ensure the above gives us the correct result
+SELECT DISTINCT four FROM tenk1 WHERE four = 0;
+ four
+------
+ 0
+(1 row)
+
+-- Ensure we get a plan with a Limit 1
+EXPLAIN (COSTS OFF)
+SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0;
+ QUERY PLAN
+------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> GroupAggregate
+ Group Key: four
+ -> Sort
+ Sort Key: four
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: four
+ -> Streaming HashAggregate
+ Group Key: four
+ -> Seq Scan on tenk1
+ Filter: ((four = 0) AND (two <> 0))
+(12 rows)
+
+-- Ensure no rows are returned
+SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0;
+ four
+------
+(0 rows)
+
+-- Ensure we get a plan with a Limit 1 when the SELECT list contains constants
+EXPLAIN (COSTS OFF)
+SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> GroupAggregate
+ Group Key: four, (1), (2), (3)
+ -> Sort
+ Sort Key: four, (1), (2), (3)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: four, (1), (2), (3)
+ -> Streaming HashAggregate
+ Group Key: four, 1, 2, 3
+ -> Seq Scan on tenk1
+ Filter: (four = 0)
+(12 rows)
+
+-- Ensure we only get 1 row
+SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0;
+ four | ?column? | ?column? | ?column?
+------+----------+----------+----------
+ 0 | 1 | 2 | 3
+(1 row)
+
--
-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
-- very own regression file.
diff --git a/src/test/regress/expected/select_into_optimizer.out b/src/test/regress/expected/select_into_optimizer.out
index 083a617f302..4de1405da7c 100644
--- a/src/test/regress/expected/select_into_optimizer.out
+++ b/src/test/regress/expected/select_into_optimizer.out
@@ -202,9 +202,9 @@ DROP TABLE easi, easi2;
--
-- Disallowed uses of SELECT ... INTO. All should fail
--
-DECLARE foo CURSOR FOR SELECT 1 INTO b;
+DECLARE foo CURSOR FOR SELECT 1 INTO int4_tbl;
ERROR: SELECT ... INTO is not allowed here
-LINE 1: DECLARE foo CURSOR FOR SELECT 1 INTO b;
+LINE 1: DECLARE foo CURSOR FOR SELECT 1 INTO int4_tbl;
^
COPY (SELECT 1 INTO frak UNION SELECT 2) TO 'blob';
ERROR: COPY (SELECT INTO) is not supported
@@ -212,12 +212,12 @@ SELECT * FROM (SELECT 1 INTO f) bar;
ERROR: SELECT ... INTO is not allowed here
LINE 1: SELECT * FROM (SELECT 1 INTO f) bar;
^
-CREATE VIEW foo AS SELECT 1 INTO b;
+CREATE VIEW foo AS SELECT 1 INTO int4_tbl;
ERROR: views must not contain SELECT INTO
-INSERT INTO b SELECT 1 INTO f;
+INSERT INTO int4_tbl SELECT 1 INTO f;
ERROR: SELECT ... INTO is not allowed here
-LINE 1: INSERT INTO b SELECT 1 INTO f;
- ^
+LINE 1: INSERT INTO int4_tbl SELECT 1 INTO f;
+ ^
--
-- Empty target list
--
diff --git a/src/test/regress/expected/select_optimizer.out b/src/test/regress/expected/select_optimizer.out
index 9a3592bea03..e54e00f5430 100644
--- a/src/test/regress/expected/select_optimizer.out
+++ b/src/test/regress/expected/select_optimizer.out
@@ -293,10 +293,6 @@ RESET enable_seqscan;
RESET enable_bitmapscan;
RESET enable_sort;
RESET optimizer_enable_tablescan;
-SELECT two, stringu1, ten, string4
- INTO TABLE tmp
- FROM onek;
-NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entry.
--
-- awk '{print $1,$2;}' person.data |
-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data |
diff --git a/src/test/regress/expected/spgist_optimizer.out b/src/test/regress/expected/spgist_optimizer.out
index bc2c79503af..4a402d5fa5d 100644
--- a/src/test/regress/expected/spgist_optimizer.out
+++ b/src/test/regress/expected/spgist_optimizer.out
@@ -87,3 +87,16 @@ select * from spgist_domain_tbl where f1 = 'fo';
fo
(1 row)
+-- test an unlogged table, mostly to get coverage of spgistbuildempty
+create unlogged table spgist_unlogged_tbl(id serial, b box);
+ERROR: unlogged sequences are not supported
+create index spgist_unlogged_idx on spgist_unlogged_tbl using spgist (b);
+ERROR: relation "spgist_unlogged_tbl" does not exist
+insert into spgist_unlogged_tbl(b)
+select box(point(i,j))
+ from generate_series(1,100,5) i,
+ generate_series(1,10,5) j;
+ERROR: relation "spgist_unlogged_tbl" does not exist
+LINE 1: insert into spgist_unlogged_tbl(b)
+ ^
+-- leave this table around, to help in testing dump/restore
diff --git a/src/test/regress/expected/stats_ext_optimizer.out b/src/test/regress/expected/stats_ext_optimizer.out
index f62d399b4ca..27c8d00ab49 100644
--- a/src/test/regress/expected/stats_ext_optimizer.out
+++ b/src/test/regress/expected/stats_ext_optimizer.out
@@ -155,13 +155,12 @@ Statistics objects:
"public.ab1_a_b_stats" ON a, b FROM ab1; STATISTICS 0
ANALYZE ab1;
-SELECT stxname, stxdndistinct, stxddependencies, stxdmcv
- FROM pg_statistic_ext s, pg_statistic_ext_data d
- WHERE s.stxname = 'ab1_a_b_stats'
- AND d.stxoid = s.oid;
- stxname | stxdndistinct | stxddependencies | stxdmcv
----------------+---------------+------------------+---------
- ab1_a_b_stats | | |
+SELECT stxname, stxdndistinct, stxddependencies, stxdmcv, stxdinherit
+ FROM pg_statistic_ext s LEFT JOIN pg_statistic_ext_data d ON (d.stxoid = s.oid)
+ WHERE s.stxname = 'ab1_a_b_stats';
+ stxname | stxdndistinct | stxddependencies | stxdmcv | stxdinherit
+---------------+---------------+------------------+---------+-------------
+ ab1_a_b_stats | | | |
(1 row)
ALTER STATISTICS ab1_a_b_stats SET STATISTICS -1;
@@ -218,12 +217,11 @@ SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b
CREATE STATISTICS stxdinh ON a, b FROM stxdinh;
VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2;
--- Since the stats object does not include inherited stats, it should not
--- affect the estimates
+-- See if the extended stats affect the estimates
SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2');
estimated | actual
-----------+--------
- 400 | 150
+ 150 | 150
(1 row)
-- Dependencies are applied at individual relations (within append), so
@@ -234,12 +232,25 @@ SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b
26 | 40
(1 row)
+-- Ensure correct (non-inherited) stats are applied to inherited query
+SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh GROUP BY 1, 2');
+ estimated | actual
+-----------+--------
+ 100 | 100
+(1 row)
+
+SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh WHERE a = 0 AND b = 0');
+ estimated | actual
+-----------+--------
+ 20 | 20
+(1 row)
+
DROP TABLE stxdinh, stxdinh1, stxdinh2;
-- Ensure inherited stats ARE applied to inherited query in partitioned table
CREATE TABLE stxdinp(i int, a int, b int) PARTITION BY RANGE (i);
CREATE TABLE stxdinp1 PARTITION OF stxdinp FOR VALUES FROM (1) TO (100);
INSERT INTO stxdinp SELECT 1, a/100, a/100 FROM generate_series(1, 999) a;
-CREATE STATISTICS stxdinp ON a, b FROM stxdinp;
+CREATE STATISTICS stxdinp ON (a + 1), a, b FROM stxdinp;
VACUUM ANALYZE stxdinp; -- partitions are processed recursively
SELECT 1 FROM pg_statistic_ext WHERE stxrelid = 'stxdinp'::regclass;
?column?
@@ -253,6 +264,12 @@ SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinp GROUP BY 1, 2');
10 | 10
(1 row)
+SELECT * FROM check_estimated_rows('SELECT a + 1, b FROM ONLY stxdinp GROUP BY 1, 2');
+ estimated | actual
+-----------+--------
+ 1 | 0
+(1 row)
+
DROP TABLE stxdinp;
-- basic test for statistics on expressions
CREATE TABLE ab1 (a INTEGER, b INTEGER, c TIMESTAMP, d TIMESTAMPTZ);
@@ -278,14 +295,23 @@ SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_3';
CREATE STATISTICS ab1_exprstat_4 ON date_trunc('day', d) FROM ab1;
-- date_trunc on timestamp is immutable
CREATE STATISTICS ab1_exprstat_5 ON date_trunc('day', c) FROM ab1;
+-- check use of a boolean-returning expression
+CREATE STATISTICS ab1_exprstat_6 ON
+ (case a when 1 then true else false end), b FROM ab1;
-- insert some data and run analyze, to test that these cases build properly
INSERT INTO ab1
-SELECT
- generate_series(1,10),
- generate_series(1,10),
- generate_series('2020-10-01'::timestamp, '2020-10-10'::timestamp, interval '1 day'),
- generate_series('2020-10-01'::timestamptz, '2020-10-10'::timestamptz, interval '1 day');
+SELECT x / 10, x / 3,
+ '2020-10-01'::timestamp + x * interval '1 day',
+ '2020-10-01'::timestamptz + x * interval '1 day'
+FROM generate_series(1, 100) x;
ANALYZE ab1;
+-- apply some stats
+SELECT * FROM check_estimated_rows('SELECT * FROM ab1 WHERE (case a when 1 then true else false end) AND b=2');
+ estimated | actual
+-----------+--------
+ 3 | 0
+(1 row)
+
DROP TABLE ab1;
-- Verify supported object types for extended statistics
CREATE schema tststats;
@@ -308,14 +334,18 @@ CREATE TABLE tststats.pt1 PARTITION OF tststats.pt FOR VALUES FROM (-10, -10) TO
NOTICE: table has parent, setting distribution columns to match parent table
CREATE STATISTICS tststats.s1 ON a, b FROM tststats.t;
CREATE STATISTICS tststats.s2 ON a, b FROM tststats.ti;
-ERROR: relation "ti" is not a table, foreign table, or materialized view
+DETAIL: This operation is not supported for indexes.
+ERROR: cannot define statistics for relation "ti"
CREATE STATISTICS tststats.s3 ON a, b FROM tststats.s;
-ERROR: relation "s" is not a table, foreign table, or materialized view
+DETAIL: This operation is not supported for sequences.
+ERROR: cannot define statistics for relation "s"
CREATE STATISTICS tststats.s4 ON a, b FROM tststats.v;
-ERROR: relation "v" is not a table, foreign table, or materialized view
+DETAIL: This operation is not supported for views.
+ERROR: cannot define statistics for relation "v"
CREATE STATISTICS tststats.s5 ON a, b FROM tststats.mv;
CREATE STATISTICS tststats.s6 ON a, b FROM tststats.ty;
-ERROR: relation "ty" is not a table, foreign table, or materialized view
+DETAIL: This operation is not supported for composite types.
+ERROR: cannot define statistics for relation "ty"
CREATE STATISTICS tststats.s7 ON a, b FROM tststats.f;
CREATE STATISTICS tststats.s8 ON a, b FROM tststats.pt;
CREATE STATISTICS tststats.s9 ON a, b FROM tststats.pt1;
@@ -1857,7 +1887,8 @@ CREATE TABLE mcv_lists (
b VARCHAR,
filler3 DATE,
c INT,
- d TEXT
+ d TEXT,
+ ia INT[]
)
WITH (autovacuum_enabled = off);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'filler1' as the Apache Cloudberry data distribution key for this table.
@@ -1929,8 +1960,9 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 A
-- 100 distinct combinations, all in the MCV list
TRUNCATE mcv_lists;
DROP STATISTICS mcv_lists_stats;
-INSERT INTO mcv_lists (a, b, c, filler1)
- SELECT mod(i,100), mod(i,50), mod(i,25), i FROM generate_series(1,5000) s(i);
+INSERT INTO mcv_lists (a, b, c, ia, filler1)
+ SELECT mod(i,100), mod(i,50), mod(i,25), array[mod(i,25)], i
+ FROM generate_series(1,5000) s(i);
ANALYZE mcv_lists;
SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
estimated | actual
@@ -2070,8 +2102,14 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY
22 | 100
(1 row)
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)');
+ estimated | actual
+-----------+--------
+ 72 | 50
+(1 row)
+
-- create statistics
-CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists;
+CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c, ia FROM mcv_lists;
ANALYZE mcv_lists;
SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
estimated | actual
@@ -2217,6 +2255,12 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY
22 | 100
(1 row)
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)');
+ estimated | actual
+-----------+--------
+ 72 | 50
+(1 row)
+
-- check change of unrelated column type does not reset the MCV statistics
ALTER TABLE mcv_lists ALTER COLUMN d TYPE VARCHAR(64);
SELECT d.stxdmcv IS NOT NULL
@@ -2610,18 +2654,18 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
INSERT INTO mcv_lists_uuid (a, b, c)
SELECT
- md5(mod(i,100)::text)::uuid,
- md5(mod(i,50)::text)::uuid,
- md5(mod(i,25)::text)::uuid
+ fipshash(mod(i,100)::text)::uuid,
+ fipshash(mod(i,50)::text)::uuid,
+ fipshash(mod(i,25)::text)::uuid
FROM generate_series(1,5000) s(i);
ANALYZE mcv_lists_uuid;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc'' AND b = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc''');
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f''');
estimated | actual
-----------+--------
2 | 50
(1 row)
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc'' AND b = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc'' AND c = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc''');
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f''');
estimated | actual
-----------+--------
1 | 50
@@ -2630,13 +2674,13 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''167
CREATE STATISTICS mcv_lists_uuid_stats (mcv) ON a, b, c
FROM mcv_lists_uuid;
ANALYZE mcv_lists_uuid;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc'' AND b = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc''');
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f''');
estimated | actual
-----------+--------
2 | 50
(1 row)
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc'' AND b = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc'' AND c = ''1679091c-5a88-0faf-6fb5-e6087eb1b2dc''');
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f''');
estimated | actual
-----------+--------
1 | 50
@@ -2654,7 +2698,7 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
INSERT INTO mcv_lists_arrays (a, b, c)
SELECT
- ARRAY[md5((i/100)::text), md5((i/100-1)::text), md5((i/100+1)::text)],
+ ARRAY[fipshash((i/100)::text), fipshash((i/100-1)::text), fipshash((i/100+1)::text)],
ARRAY[(i/100-1)::numeric/1000, (i/100)::numeric/1000, (i/100+1)::numeric/1000],
ARRAY[(i/100-1), i/100, (i/100+1)]
FROM generate_series(1,5000) s(i);
@@ -3039,7 +3083,7 @@ SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b =
DROP TABLE expr_stats;
-- statistics on expressions with different data types
CREATE TABLE expr_stats (a int, b name, c text);
-INSERT INTO expr_stats SELECT mod(i,10), md5(mod(i,10)::text), md5(mod(i,10)::text) FROM generate_series(1,1000) s(i);
+INSERT INTO expr_stats SELECT mod(i,10), fipshash(mod(i,10)::text), fipshash(mod(i,10)::text) FROM generate_series(1,1000) s(i);
ANALYZE expr_stats;
SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0''');
estimated | actual
@@ -3094,11 +3138,11 @@ CREATE STATISTICS tststats.priv_test_stats (mcv) ON a, b
ANALYZE tststats.priv_test_tbl;
-- Check printing info about extended statistics by \dX
create table stts_t1 (a int, b int);
-create statistics stts_1 (ndistinct) on a, b from stts_t1;
-create statistics stts_2 (ndistinct, dependencies) on a, b from stts_t1;
-create statistics stts_3 (ndistinct, dependencies, mcv) on a, b from stts_t1;
+create statistics (ndistinct) on a, b from stts_t1;
+create statistics (ndistinct, dependencies) on a, b from stts_t1;
+create statistics (ndistinct, dependencies, mcv) on a, b from stts_t1;
create table stts_t2 (a int, b int, c int);
-create statistics stts_4 on b, c from stts_t2;
+create statistics on b, c from stts_t2;
create table stts_t3 (col1 int, col2 int, col3 int);
create statistics stts_hoge on col1, col2, col3 from stts_t3;
create schema stts_s1;
@@ -3116,24 +3160,24 @@ set search_path to public, stts_s1, stts_s2, tststats;
public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined
public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined
public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined
- public | stts_1 | a, b FROM stts_t1 | defined | |
- public | stts_2 | a, b FROM stts_t1 | defined | defined |
- public | stts_3 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_4 | b, c FROM stts_t2 | defined | defined | defined
public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
+ public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
+ public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
+ public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
+ public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined
stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined
tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined
(12 rows)
-\dX stts_?
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+--------+-------------------+-----------+--------------+---------
- public | stts_1 | a, b FROM stts_t1 | defined | |
- public | stts_2 | a, b FROM stts_t1 | defined | defined |
- public | stts_3 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_4 | b, c FROM stts_t2 | defined | defined | defined
+\dX stts_t*
+ List of extended statistics
+ Schema | Name | Definition | Ndistinct | Dependencies | MCV
+--------+-------------------+-------------------+-----------+--------------+---------
+ public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
+ public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
+ public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
+ public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
(4 rows)
\dX *stts_hoge
@@ -3151,24 +3195,24 @@ set search_path to public, stts_s1, stts_s2, tststats;
public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined
public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined
public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined
- public | stts_1 | a, b FROM stts_t1 | defined | |
- public | stts_2 | a, b FROM stts_t1 | defined | defined |
- public | stts_3 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_4 | b, c FROM stts_t2 | defined | defined | defined
public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
+ public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
+ public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
+ public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
+ public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined
stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined
tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined
(12 rows)
-\dX+ stts_?
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+--------+-------------------+-----------+--------------+---------
- public | stts_1 | a, b FROM stts_t1 | defined | |
- public | stts_2 | a, b FROM stts_t1 | defined | defined |
- public | stts_3 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_4 | b, c FROM stts_t2 | defined | defined | defined
+\dX+ stts_t*
+ List of extended statistics
+ Schema | Name | Definition | Ndistinct | Dependencies | MCV
+--------+-------------------+-------------------+-----------+--------------+---------
+ public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
+ public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
+ public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
+ public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
(4 rows)
\dX+ *stts_hoge
@@ -3185,6 +3229,21 @@ set search_path to public, stts_s1, stts_s2, tststats;
stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined
(1 row)
+create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1;
+create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1;
+create statistics (mcv) ON (a+b), (a-b) FROM stts_t1;
+\dX stts_t*expr*
+ List of extended statistics
+ Schema | Name | Definition | Ndistinct | Dependencies | MCV
+--------+-----------------------------+-------------------------------------+-----------+--------------+---------
+ public | stts_t1_a_b_expr_expr_stat | a, b, (a + b), (a - b) FROM stts_t1 | | | defined
+ public | stts_t1_a_b_expr_expr_stat1 | a, b, (a + b), (a - b) FROM stts_t1 | | | defined
+ public | stts_t1_expr_expr_stat | (a + b), (a - b) FROM stts_t1 | | | defined
+(3 rows)
+
+drop statistics stts_t1_a_b_expr_expr_stat;
+drop statistics stts_t1_a_b_expr_expr_stat1;
+drop statistics stts_t1_expr_expr_stat;
set search_path to public, stts_s1;
\dX
List of extended statistics
@@ -3194,11 +3253,11 @@ set search_path to public, stts_s1;
public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined
public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined
public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined
- public | stts_1 | a, b FROM stts_t1 | defined | |
- public | stts_2 | a, b FROM stts_t1 | defined | defined |
- public | stts_3 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_4 | b, c FROM stts_t2 | defined | defined | defined
public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
+ public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
+ public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
+ public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
+ public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined
(10 rows)
@@ -3212,11 +3271,11 @@ set role regress_stats_ext;
public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined
public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined
public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined
- public | stts_1 | a, b FROM stts_t1 | defined | |
- public | stts_2 | a, b FROM stts_t1 | defined | defined |
- public | stts_3 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_4 | b, c FROM stts_t2 | defined | defined | defined
public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
+ public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
+ public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
+ public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
+ public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
(9 rows)
reset role;
@@ -3231,6 +3290,10 @@ GRANT USAGE ON SCHEMA tststats TO regress_stats_user1;
SET SESSION AUTHORIZATION regress_stats_user1;
SELECT * FROM tststats.priv_test_tbl; -- Permission denied
ERROR: permission denied for table priv_test_tbl
+-- Check individual columns if we don't have table privilege
+SELECT * FROM tststats.priv_test_tbl
+ WHERE a = 1 and tststats.priv_test_tbl.* > (1, 1) is not null;
+ERROR: permission denied for table priv_test_tbl
-- Attempt to gain access using a leaky operator
CREATE FUNCTION op_leak(int, int) RETURNS bool
AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END'
@@ -3266,10 +3329,53 @@ SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not le
(0 rows)
DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
+-- privilege checks for pg_stats_ext and pg_stats_ext_exprs
+RESET SESSION AUTHORIZATION;
+CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT);
+INSERT INTO stats_ext_tbl (col) VALUES ('secret'), ('secret'), ('very secret');
+CREATE STATISTICS s_col ON id, col FROM stats_ext_tbl;
+CREATE STATISTICS s_expr ON mod(id, 2), lower(col) FROM stats_ext_tbl;
+ANALYZE stats_ext_tbl;
+-- unprivileged role should not have access
+SET SESSION AUTHORIZATION regress_stats_user1;
+SELECT statistics_name, most_common_vals FROM pg_stats_ext x
+ WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*);
+ statistics_name | most_common_vals
+-----------------+------------------
+(0 rows)
+
+SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x
+ WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*);
+ statistics_name | most_common_vals
+-----------------+------------------
+(0 rows)
+
+-- give unprivileged role ownership of table
+RESET SESSION AUTHORIZATION;
+ALTER TABLE stats_ext_tbl OWNER TO regress_stats_user1;
+-- unprivileged role should now have access
+SET SESSION AUTHORIZATION regress_stats_user1;
+SELECT statistics_name, most_common_vals FROM pg_stats_ext x
+ WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*);
+ statistics_name | most_common_vals
+-----------------+-------------------------------------------
+ s_col | {{1,secret},{2,secret},{3,"very secret"}}
+ s_expr | {{0,secret},{1,secret},{1,"very secret"}}
+(2 rows)
+
+SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x
+ WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*);
+ statistics_name | most_common_vals
+-----------------+------------------
+ s_expr | {1}
+ s_expr | {secret}
+(2 rows)
+
-- Tidy up
DROP OPERATOR <<< (int, int);
DROP FUNCTION op_leak(int, int);
RESET SESSION AUTHORIZATION;
+DROP TABLE stats_ext_tbl;
DROP SCHEMA tststats CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table tststats.priv_test_tbl
diff --git a/src/test/regress/expected/subselect_gp_optimizer.out b/src/test/regress/expected/subselect_gp_optimizer.out
index 72830e65e08..442a3daac1a 100644
--- a/src/test/regress/expected/subselect_gp_optimizer.out
+++ b/src/test/regress/expected/subselect_gp_optimizer.out
@@ -1916,20 +1916,22 @@ EXPLAIN select count(distinct ss.ten) from
-- we should see 2 subplans in the explain
--
EXPLAIN SELECT EXISTS(SELECT * FROM tenk1 WHERE tenk1.unique1 = tenk2.unique1) FROM tenk2 LIMIT 1;
- QUERY PLAN
-----------------------------------------------------------------------------------------------
- Limit (cost=0.00..865.45 rows=1 width=1)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..865.45 rows=1 width=1)
- -> Limit (cost=0.00..865.45 rows=1 width=1)
- -> Hash Left Join (cost=0.00..865.42 rows=3334 width=8)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------
+ Limit (cost=0.00..865.49 rows=1 width=1)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..865.49 rows=1 width=1)
+ -> Limit (cost=0.00..865.49 rows=1 width=1)
+ -> Hash Left Join (cost=0.00..865.46 rows=3334 width=8)
Hash Cond: (tenk2.unique1 = tenk1.unique1)
- -> Seq Scan on tenk2 (cost=0.00..431.51 rows=3334 width=4)
+ -> Redistribute Motion 3:3 (slice2; segments: 3) (cost=0.00..431.58 rows=3334 width=4)
+ Hash Key: tenk2.unique1
+ -> Seq Scan on tenk2 (cost=0.00..431.51 rows=3334 width=4)
-> Hash (cost=431.96..431.96 rows=3334 width=12)
-> HashAggregate (cost=0.00..431.96 rows=3334 width=12)
Group Key: tenk1.unique1
-> Seq Scan on tenk1 (cost=0.00..431.51 rows=3334 width=4)
- Optimizer: Pivotal Optimizer (GPORCA)
-(11 rows)
+ Optimizer: GPORCA
+(13 rows)
SELECT EXISTS(SELECT * FROM tenk1 WHERE tenk1.unique1 = tenk2.unique1) FROM tenk2 LIMIT 1;
exists
diff --git a/src/test/regress/expected/subselect_optimizer.out b/src/test/regress/expected/subselect_optimizer.out
index 5b20a89461a..7dee1bd8aac 100644
--- a/src/test/regress/expected/subselect_optimizer.out
+++ b/src/test/regress/expected/subselect_optimizer.out
@@ -30,6 +30,12 @@ SELECT * FROM ((SELECT 1 AS x)) ss;
1
(1 row)
+SELECT * FROM ((SELECT 1 AS x)), ((SELECT * FROM ((SELECT 2 AS y))));
+ x | y
+---+---
+ 1 | 2
+(1 row)
+
(SELECT 2) UNION SELECT 2;
?column?
----------
@@ -196,6 +202,69 @@ SELECT f1 AS "Correlated Field"
3
(5 rows)
+-- Subselects without aliases
+SELECT count FROM (SELECT COUNT(DISTINCT name) FROM road);
+ count
+-------
+ 2911
+(1 row)
+
+SELECT COUNT(*) FROM (SELECT DISTINCT name FROM road);
+ count
+-------
+ 2911
+(1 row)
+
+SELECT * FROM (SELECT * FROM int4_tbl), (VALUES (123456)) WHERE f1 = column1;
+ f1 | column1
+--------+---------
+ 123456 | 123456
+(1 row)
+
+CREATE VIEW view_unnamed_ss AS
+SELECT * FROM (SELECT * FROM (SELECT abs(f1) AS a1 FROM int4_tbl)),
+ (SELECT * FROM int8_tbl)
+ WHERE a1 < 10 AND q1 > a1 ORDER BY q1, q2;
+SELECT * FROM view_unnamed_ss;
+ a1 | q1 | q2
+----+------------------+-------------------
+ 0 | 123 | 456
+ 0 | 123 | 4567890123456789
+ 0 | 4567890123456789 | 123
+ 0 | 4567890123456789 | 4567890123456789
+ 0 | 4567890123456789 | -4567890123456789
+(5 rows)
+
+\sv view_unnamed_ss
+CREATE OR REPLACE VIEW public.view_unnamed_ss AS
+ SELECT unnamed_subquery.a1,
+ unnamed_subquery_1.q1,
+ unnamed_subquery_1.q2
+ FROM ( SELECT unnamed_subquery_2.a1
+ FROM ( SELECT abs(int4_tbl.f1) AS a1
+ FROM int4_tbl) unnamed_subquery_2) unnamed_subquery,
+ ( SELECT int8_tbl.q1,
+ int8_tbl.q2
+ FROM int8_tbl) unnamed_subquery_1
+ WHERE unnamed_subquery.a1 < 10 AND unnamed_subquery_1.q1 > unnamed_subquery.a1
+ ORDER BY unnamed_subquery_1.q1, unnamed_subquery_1.q2
+DROP VIEW view_unnamed_ss;
+-- Test matching of locking clause to correct alias
+CREATE VIEW view_unnamed_ss_locking AS
+SELECT * FROM (SELECT * FROM int4_tbl), int8_tbl AS unnamed_subquery
+ WHERE f1 = q1
+ FOR UPDATE OF unnamed_subquery;
+\sv view_unnamed_ss_locking
+CREATE OR REPLACE VIEW public.view_unnamed_ss_locking AS
+ SELECT unnamed_subquery.f1,
+ unnamed_subquery_1.q1,
+ unnamed_subquery_1.q2
+ FROM ( SELECT int4_tbl.f1
+ FROM int4_tbl) unnamed_subquery,
+ int8_tbl unnamed_subquery_1
+ WHERE unnamed_subquery.f1 = unnamed_subquery_1.q1
+ FOR UPDATE OF unnamed_subquery_1
+DROP VIEW view_unnamed_ss_locking;
--
-- Use some existing tables in the regression test
--
@@ -987,8 +1056,8 @@ explain (costs off)
select count(*) from tenk1 t
where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0)
and thousand = 1;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------
Aggregate
-> Gather Motion 3:1 (slice1; segments: 3)
-> GroupAggregate
@@ -1727,6 +1796,159 @@ fetch backward all in c1;
ERROR: backward scan is not supported in this version of Apache Cloudberry
commit;
--end_ignore
+--
+-- Verify that we correctly flatten cases involving a subquery output
+-- expression that doesn't need to be wrapped in a PlaceHolderVar
+--
+explain (costs off)
+select tname, attname from (
+select relname::information_schema.sql_identifier as tname, * from
+ (select * from pg_class c) ss1) ss2
+ right join pg_attribute a on a.attrelid = ss2.oid
+where tname = 'tenk1' and attnum = 1;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Hash Join
+ Hash Cond: (a.attrelid = c.oid)
+ -> Seq Scan on pg_attribute a
+ Filter: (attnum = 1)
+ -> Hash
+ -> Index Scan using pg_class_relname_nsp_index on pg_class c
+ Index Cond: (relname = 'tenk1'::name)
+(8 rows)
+
+select tname, attname from (
+select relname::information_schema.sql_identifier as tname, * from
+ (select * from pg_class c) ss1) ss2
+ right join pg_attribute a on a.attrelid = ss2.oid
+where tname = 'tenk1' and attnum = 1;
+ tname | attname
+-------+---------
+ tenk1 | unique1
+(1 row)
+
+-- Check behavior when there's a lateral reference in the output expression
+explain (verbose, costs off)
+select t1.ten, sum(x) from
+ tenk1 t1 left join lateral (
+ select t1.ten + t2.ten as x, t2.fivethous from tenk1 t2
+ ) ss on t1.unique1 = ss.fivethous
+group by t1.ten
+order by t1.ten;
+ QUERY PLAN
+----------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Output: t1.ten, (sum((t1.ten + t2.ten)))
+ Merge Key: t1.ten
+ -> Sort
+ Output: t1.ten, (sum((t1.ten + t2.ten)))
+ Sort Key: t1.ten
+ -> Finalize HashAggregate
+ Output: t1.ten, sum((t1.ten + t2.ten))
+ Group Key: t1.ten
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Output: t1.ten, (PARTIAL sum((t1.ten + t2.ten)))
+ Hash Key: t1.ten
+ -> Partial HashAggregate
+ Output: t1.ten, PARTIAL sum((t1.ten + t2.ten))
+ Group Key: t1.ten
+ -> Hash Right Join
+ Output: t1.ten, t2.ten
+ Hash Cond: (t2.fivethous = t1.unique1)
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Output: t2.ten, t2.fivethous
+ Hash Key: t2.fivethous
+ -> Seq Scan on public.tenk1 t2
+ Output: t2.ten, t2.fivethous
+ -> Hash
+ Output: t1.ten, t1.unique1
+ -> Seq Scan on public.tenk1 t1
+ Output: t1.ten, t1.unique1
+(29 rows)
+
+select t1.ten, sum(x) from
+ tenk1 t1 left join lateral (
+ select t1.ten + t2.ten as x, t2.fivethous from tenk1 t2
+ ) ss on t1.unique1 = ss.fivethous
+group by t1.ten
+order by t1.ten;
+ ten | sum
+-----+-------
+ 0 | 0
+ 1 | 2000
+ 2 | 4000
+ 3 | 6000
+ 4 | 8000
+ 5 | 10000
+ 6 | 12000
+ 7 | 14000
+ 8 | 16000
+ 9 | 18000
+(10 rows)
+
+explain (verbose, costs off)
+select t1.q1, x from
+ int8_tbl t1 left join
+ (int8_tbl t2 left join
+ lateral (select t2.q1+t3.q1 as x, * from int8_tbl t3) t3 on t2.q2 = t3.q2)
+ on t1.q2 = t2.q2
+order by 1, 2;
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ Output: t1.q1, ((t2.q1 + t3.q1))
+ Merge Key: t1.q1, ((t2.q1 + t3.q1))
+ -> Sort
+ Output: t1.q1, ((t2.q1 + t3.q1))
+ Sort Key: t1.q1, ((t2.q1 + t3.q1))
+ -> Hash Right Join
+ Output: t1.q1, (t2.q1 + t3.q1)
+ Hash Cond: (t2.q2 = t1.q2)
+ -> Hash Left Join
+ Output: t2.q1, t2.q2, t3.q1
+ Hash Cond: (t2.q2 = t3.q2)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Output: t2.q1, t2.q2
+ Hash Key: t2.q2
+ -> Seq Scan on public.int8_tbl t2
+ Output: t2.q1, t2.q2
+ -> Hash
+ Output: t3.q1, t3.q2
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Output: t3.q1, t3.q2
+ Hash Key: t3.q2
+ -> Seq Scan on public.int8_tbl t3
+ Output: t3.q1, t3.q2
+ -> Hash
+ Output: t1.q1, t1.q2
+ -> Redistribute Motion 3:3 (slice4; segments: 3)
+ Output: t1.q1, t1.q2
+ Hash Key: t1.q2
+ -> Seq Scan on public.int8_tbl t1
+ Output: t1.q1, t1.q2
+(33 rows)
+
+select t1.q1, x from
+ int8_tbl t1 left join
+ (int8_tbl t2 left join
+ lateral (select t2.q1+t3.q1 as x, * from int8_tbl t3) t3 on t2.q2 = t3.q2)
+ on t1.q2 = t2.q2
+order by 1, 2;
+ q1 | x
+------------------+------------------
+ 123 | 246
+ 123 | 246
+ 123 | 4567890123456912
+ 123 | 4567890123456912
+ 123 | 9135780246913578
+ 4567890123456789 | 246
+ 4567890123456789 | 4567890123456912
+ 4567890123456789 | 4567890123456912
+ 4567890123456789 | 9135780246913578
+ 4567890123456789 | 9135780246913578
+ 4567890123456789 | 9135780246913578
+(11 rows)
+
--
-- Tests for CTE inlining behavior
--
diff --git a/src/test/regress/expected/table_functions_optimizer.out b/src/test/regress/expected/table_functions_optimizer.out
index 6e189071762..a3e855d8988 100644
--- a/src/test/regress/expected/table_functions_optimizer.out
+++ b/src/test/regress/expected/table_functions_optimizer.out
@@ -40,37 +40,39 @@ CREATE FUNCTION scalar_tf_5(IN a int) RETURNS SETOF RECORD
CREATE FUNCTION scalar_tf_6(IN a anyelement) RETURNS SETOF example
AS $$ SELECT a+$1, b from example $$ LANGUAGE SQL READS SQL DATA;
/* CREATE some multiset input table functions */
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set regress_dll :abs_builddir '/regress.so'
/* scalar value outputs */
CREATE FUNCTION multiset_scalar_null(anytable) RETURNS int
- AS '@abs_srcdir@/regress.so', 'multiset_scalar_null' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_scalar_null' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_scalar_value(anytable) RETURNS int
- AS '@abs_srcdir@/regress.so', 'multiset_scalar_value' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_scalar_value' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_scalar_tuple(anytable) RETURNS example
- AS '@abs_srcdir@/regress.so', 'multiset_scalar_tuple' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_scalar_tuple' LANGUAGE C READS SQL DATA;
/* set value outputs */
CREATE FUNCTION multiset_setof_null(anytable) RETURNS setof int
- AS '@abs_srcdir@/regress.so', 'multiset_setof_null' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_setof_null' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_setof_value(anytable) RETURNS setof int
- AS '@abs_srcdir@/regress.so', 'multiset_setof_value' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_setof_value' LANGUAGE C READS SQL DATA;
/* Bunches of different ways of saying "returns a setof rows */
CREATE FUNCTION multiset_materialize_good(anytable)
RETURNS TABLE(a int, b text)
- AS '@abs_srcdir@/regress.so', 'multiset_materialize_good' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_materialize_good' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_materialize_bad(anytable)
RETURNS TABLE(a int, b text)
- AS '@abs_srcdir@/regress.so', 'multiset_materialize_bad' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_materialize_bad' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_1(a anytable) RETURNS TABLE(a int, b text)
- AS '@abs_srcdir@/regress.so', 'multiset_example' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_example' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_2(a anytable) RETURNS TABLE(a int, b text)
- AS '@abs_srcdir@/regress.so', 'multiset_example' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_example' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_3(a anytable, out a int, out b text) RETURNS SETOF RECORD
- AS '@abs_srcdir@/regress.so', 'multiset_example' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_example' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_4(a anytable) RETURNS SETOF RECORD
- AS '@abs_srcdir@/regress.so', 'multiset_example' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_example' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_5(a anytable) RETURNS SETOF example
- AS '@abs_srcdir@/regress.so', 'multiset_example' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_example' LANGUAGE C READS SQL DATA;
CREATE FUNCTION multiset_6(a anytable) RETURNS SETOF record
- AS '@abs_srcdir@/regress.so', 'multiset_example' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_example' LANGUAGE C READS SQL DATA;
/* Negative test cases around CREATE FUNCTION */
/* ERROR: TABLE output not allowed with OUT parameters */
CREATE FUNCTION error(OUT a int) RETURNS TABLE(a int, b text)
@@ -102,7 +104,7 @@ CREATE FUNCTION error() RETURNS TABLE(a setof example)
ERROR: functions cannot accept set arguments
/* ERROR: anytable cannot have default value */
CREATE FUNCTION error(a anytable DEFAULT TABLE(select 1,'test')) RETURNS TABLE(a int, b text)
- AS '@abs_srcdir@/regress.so', 'multiset_example' LANGUAGE C;
+ AS :'regress_dll', 'multiset_example' LANGUAGE C;
ERROR: anytable parameter cannot have default value
/* Negative test cases around the "anytable" type */
CREATE TABLE fail(x anytable);
@@ -1366,7 +1368,7 @@ LINE 1: SELECT * from nameres(TABLE(SELECT 5));
^
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
CREATE FUNCTION nameres(anytable) RETURNS int
- AS '@abs_srcdir@/regress.so', 'multiset_scalar_value' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_scalar_value' LANGUAGE C READS SQL DATA;
SELECT * from nameres(5); -- should work
nameres
---------
@@ -2247,8 +2249,8 @@ SELECT * FROM v1 order by a, b;
a | integer | | | | plain |
b | text | | | | extended |
View definition:
- SELECT tf.a,
- tf.b
+ SELECT a,
+ b
FROM multiset_2(TABLE( SELECT example.a,
example.b
FROM example)) tf;
@@ -2256,8 +2258,8 @@ View definition:
SELECT pg_get_viewdef('v1'::regclass);
pg_get_viewdef
---------------------------------------------
- SELECT tf.a, +
- tf.b +
+ SELECT a, +
+ b +
FROM multiset_2(TABLE( SELECT example.a,+
example.b +
FROM example)) tf;
@@ -2287,8 +2289,8 @@ SELECT * FROM v2 order by a, b;
a | integer | | | | plain |
b | text | | | | extended |
View definition:
- SELECT tf.a,
- tf.b
+ SELECT a,
+ b
FROM multiset_2(TABLE( SELECT example.a,
example.b
FROM example
@@ -2297,8 +2299,8 @@ View definition:
SELECT pg_get_viewdef('v2'::regclass);
pg_get_viewdef
--------------------------------------------------------
- SELECT tf.a, +
- tf.b +
+ SELECT a, +
+ b +
FROM multiset_2(TABLE( SELECT example.a, +
example.b +
FROM example +
@@ -2329,8 +2331,8 @@ SELECT * FROM v3 order by a, b;
a | integer | | | | plain |
b | text | | | | extended |
View definition:
- SELECT tf.a,
- tf.b
+ SELECT a,
+ b
FROM multiset_2(TABLE( SELECT example.a,
example.b
FROM example
@@ -2339,8 +2341,8 @@ View definition:
SELECT pg_get_viewdef('v3'::regclass);
pg_get_viewdef
---------------------------------------------
- SELECT tf.a, +
- tf.b +
+ SELECT a, +
+ b +
FROM multiset_2(TABLE( SELECT example.a,+
example.b +
FROM example +
@@ -2354,7 +2356,7 @@ DROP view v3;
-- Interaction with set returning functions
-- ========================================
CREATE FUNCTION multi_args(a anytable, x int) RETURNS SETOF example
- AS '@abs_srcdir@/regress.so', 'multiset_example' LANGUAGE C READS SQL DATA;
+ AS :'regress_dll', 'multiset_example' LANGUAGE C READS SQL DATA;
-- In select list requires some extra setrefs logic in planning
SELECT *, generate_series(1,2) FROM multi_args( TABLE(SELECT 1::int, 'hello'::text), 2);
a | b | generate_series
@@ -2449,11 +2451,11 @@ ERROR: PL/Python functions cannot accept type anytable
-- Create a function and a describe method
CREATE FUNCTION sessionize_describe(internal)
RETURNS internal
- AS '@abs_srcdir@/regress.so', 'describe'
+ AS :'regress_dll', 'describe'
LANGUAGE C READS SQL DATA;
CREATE FUNCTION sessionize(anytable, interval)
RETURNS setof record
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA;
-- No dependency yet, all three queries should return 0 rows
SELECT * FROM pg_proc_callback where profnoid = 'sessionize'::regproc;
@@ -2476,19 +2478,19 @@ DROP FUNCTION sessionize_describe(internal);
-- Should fail, no such function
CREATE OR REPLACE FUNCTION sessionize(anytable, interval)
RETURNS setof record
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA
WITH (describe = sessionize_describe);
ERROR: function sessionize_describe(internal) does not exist
-- Recreate describe function
CREATE OR REPLACE FUNCTION sessionize_describe(internal)
RETURNS internal
- AS '@abs_srcdir@/regress.so', 'describe'
+ AS :'regress_dll', 'describe'
LANGUAGE C READS SQL DATA;
-- Alter the existing function to add the describe callback
CREATE OR REPLACE FUNCTION sessionize(anytable, interval)
RETURNS setof record
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA
WITH (describe = sessionize_describe);
-- Observe the relationship now recorded in pg_proc_callback
@@ -2529,11 +2531,11 @@ SELECT * FROM pg_proc_callback where procallback not in (select oid from pg_proc
-- Recreate both functions
CREATE OR REPLACE FUNCTION sessionize_describe(internal)
RETURNS internal
- AS '@abs_srcdir@/regress.so', 'describe'
+ AS :'regress_dll', 'describe'
LANGUAGE C READS SQL DATA;
CREATE OR REPLACE FUNCTION sessionize(anytable, interval)
RETURNS setof record
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA
WITH (describe = sessionize_describe);
-- Check the dependency again, drop should fail
@@ -2550,7 +2552,7 @@ SELECT * FROM pg_proc_callback where profnoid = 'sessionize'::regproc;
-- Alter existing function to remove the describe callback
CREATE OR REPLACE FUNCTION sessionize(anytable, interval)
RETURNS setof record
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA;
-- Check the dependency again, drop should succeed
DROP FUNCTION sessionize_describe(internal);
@@ -2574,19 +2576,19 @@ SELECT * FROM pg_proc_callback where procallback not in (select oid from pg_proc
-- One more time, creating without using "OR REPLACE"
CREATE FUNCTION sessionize_describe(internal)
RETURNS internal
- AS '@abs_srcdir@/regress.so', 'describe'
+ AS :'regress_dll', 'describe'
LANGUAGE C READS SQL DATA;
CREATE FUNCTION sessionize_plain(anytable, interval)
RETURNS setof record
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA;
CREATE FUNCTION sessionize_static(anytable, interval)
RETURNS TABLE(id integer, "time" timestamp, sessionnum integer)
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA;
CREATE FUNCTION sessionize_dynamic(anytable, interval)
RETURNS setof record
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA
WITH (describe = sessionize_describe);
-- Check catalog for the new functions, should only see sessionize_describe
@@ -2871,7 +2873,7 @@ FROM sessionize_plain(
'1 minute' ) as sessionize(id integer, "time" timestamp, sessionnum integer);
CREATE OR REPLACE FUNCTION sessionize_plain(anytable, interval)
RETURNS setof record
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA
WITH (describe = sessionize_describe); -- ERROR: views exist
ERROR: cannot add DESCRIBE callback to function used in view(s)
@@ -2881,11 +2883,11 @@ DROP VIEW supported;
-- ========================
CREATE FUNCTION project_describe(internal)
RETURNS internal
- AS '@abs_srcdir@/regress.so', 'project_describe'
+ AS :'regress_dll', 'project_describe'
LANGUAGE C READS SQL DATA;
CREATE FUNCTION project(anytable, integer)
RETURNS setof record
- AS '@abs_srcdir@/regress.so', 'project'
+ AS :'regress_dll', 'project'
LANGUAGE C READS SQL DATA
WITH (describe = project_describe);
SELECT * FROM project( TABLE( SELECT * FROM history ), 1) order by 1;
@@ -3056,10 +3058,10 @@ LINE 1: ... project( TABLE( SELECT * FROM pg_am ), (ROW(1, '')::example...
-- User data exmaple
-- ========================
CREATE FUNCTION ud_describe(internal) RETURNS internal
- AS '@abs_builddir@/regress@DLSUFFIX@', 'userdata_describe'
+ AS :'regress_dll', 'userdata_describe'
LANGUAGE C READS SQL DATA;
CREATE FUNCTION ud_project(anytable) RETURNS setof RECORD
- AS '@abs_builddir@/regress@DLSUFFIX@', 'userdata_project'
+ AS :'regress_dll', 'userdata_project'
LANGUAGE C READS SQL DATA
WITH (describe = ud_describe);
SELECT * FROM ud_project( TABLE( SELECT * FROM history ) );
@@ -3089,7 +3091,7 @@ SELECT * FROM ud_project( TABLE( SELECT * FROM history ) );
-- Passing input without modification
-- ========================
CREATE FUNCTION noop_project(anytable) RETURNS setof RECORD
- AS '@abs_builddir@/regress@DLSUFFIX@'
+ AS :'regress_dll'
LANGUAGE C READS SQL DATA;
SELECT * FROM noop_project( TABLE( SELECT * FROM history ) ) AS s (id integer, time timestamp);
id | time
@@ -3160,25 +3162,25 @@ SELECT * FROM noop_project( TABLE( SELECT count(*) FROM history GROUP BY time SC
-- ========================
-- explicit return type not suitable for dynamic type resolution
CREATE FUNCTION x() returns int
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA
WITH (describe = sessionize_describe);
ERROR: DESCRIBE only supported for functions returning "record"
-- explicit return type (setof) not suitable for dynamic type resolution
CREATE FUNCTION x() returns setof int
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA
WITH (describe = sessionize_describe);
ERROR: DESCRIBE only supported for functions returning "record"
-- explicit return type (TABLE) not suitable for dynamic type resolution
CREATE FUNCTION x() returns TABLE(id integer, "time" timestamp, sessionnum integer)
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA
WITH (describe = sessionize_describe);
ERROR: DESCRIBE is not supported for functions that return TABLE
-- explicit return type (OUT PARAMS) not suitable for dynamic type resolution
CREATE FUNCTION x(OUT id integer, OUT "time" timestamp, OUT sessionnum integer)
- AS '@abs_srcdir@/regress.so', 'sessionize'
+ AS :'regress_dll', 'sessionize'
LANGUAGE C READS SQL DATA
WITH (describe = sessionize_describe);
ERROR: DESCRIBE is not supported for functions with OUT parameters
diff --git a/src/test/regress/expected/tsearch_optimizer.out b/src/test/regress/expected/tsearch_optimizer.out
index 1381b7c5ca2..d3661630f19 100644
--- a/src/test/regress/expected/tsearch_optimizer.out
+++ b/src/test/regress/expected/tsearch_optimizer.out
@@ -1,4 +1,6 @@
set optimizer_print_missing_stats = off;
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
--
-- Sanity checks for text search catalogs
--
@@ -56,6 +58,14 @@ WHERE
-------+-------+--------+--------------+----------+---------
(0 rows)
+-- Load some test data
+CREATE TABLE test_tsvector(
+ t text,
+ a tsvector
+);
+\set filename :abs_srcdir '/data/tsearch.data'
+COPY test_tsvector FROM :'filename';
+ANALYZE test_tsvector;
-- test basic text search behavior without indexes, then with
SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
count
@@ -1091,6 +1101,7 @@ SELECT * FROM ts_stat('SELECT a FROM test_tsvector', 'AB') ORDER BY ndoc DESC, n
DFG | 1 | 2
(1 row)
+DROP INDEX wowidx;
--dictionaries and to_tsvector
SELECT ts_lexize('english_stem', 'skies');
ts_lexize
@@ -1820,13 +1831,131 @@ Water, water, every where
Water, water, every where,
Nor any drop to drink.
S. T. Coleridge (1772-1834)
-', phraseto_tsquery('english', 'painted Ocean'));
- ts_headline
----------------------------------------
- painted Ship +
- Upon a painted Ocean.+
- Water, water, every where +
+', to_tsquery('english', 'day & drink'));
+ ts_headline
+------------------------------------
+ And all the boards did shrink; +
+ Nor any drop
+ Upon a painted Ocean. +
+ We stuck, nor breath nor motion,+
+ day, +
+ As idle as a painted Ship +
+ Water, water, every where +
+ Water, water, every where, +
+(1 row)
+
+SELECT ts_headline('english', '
+Day after day, day after day,
+ We stuck, nor breath nor motion,
+As idle as a painted Ship
+ Upon a painted Ocean.
+Water, water, every where
+ And all the boards did shrink;
+Water, water, every where,
+ Nor any drop to drink.
+S. T. Coleridge (1772-1834)
+', to_tsquery('english', 'day | drink'));
+ ts_headline
+-----------------------------------------------------------
+ We stuck, nor breath nor motion, +
+ Day after day, day after day,+
+ As idle as a painted
+(1 row)
+
+SELECT ts_headline('english', '
+Day after day, day after day,
+ We stuck, nor breath nor motion,
+As idle as a painted Ship
+ Upon a painted Ocean.
+Water, water, every where
+ And all the boards did shrink;
+Water, water, every where,
+ Nor any drop to drink.
+S. T. Coleridge (1772-1834)
+', to_tsquery('english', 'day | !drink'));
+ ts_headline
+-----------------------------------------------------------
+ We stuck, nor breath nor motion, +
+ Day after day, day after day,+
+ As idle as a painted
+(1 row)
+
+SELECT ts_headline('english', '
+Day after day, day after day,
+ We stuck, nor breath nor motion,
+As idle as a painted Ship
+ Upon a painted Ocean.
+Water, water, every where
+ And all the boards did shrink;
+Water, water, every where,
+ Nor any drop to drink.
+S. T. Coleridge (1772-1834)
+', to_tsquery('english', 'painted <-> Ship & drink'));
+ ts_headline
+----------------------------------
+ And all the boards did shrink;+
+ Nor any drop to drink
+ Upon a painted Ocean. +
+ painted Ship +
+ Water, water, every where +
+ Water, water, every where, +
+(1 row)
+
+SELECT ts_headline('english', '
+Day after day, day after day,
+ We stuck, nor breath nor motion,
+As idle as a painted Ship
+ Upon a painted Ocean.
+Water, water, every where
+ And all the boards did shrink;
+Water, water, every where,
+ Nor any drop to drink.
+S. T. Coleridge (1772-1834)
+', to_tsquery('english', 'painted <-> Ship | drink'));
+ ts_headline
+---------------------------------
+ And all the boards did shrink
+ Upon a painted Ocean. +
+ painted Ship +
+ Water, water, every where +
+(1 row)
+
+SELECT ts_headline('english', '
+Day after day, day after day,
+ We stuck, nor breath nor motion,
+As idle as a painted Ship
+ Upon a painted Ocean.
+Water, water, every where
+ And all the boards did shrink;
+Water, water, every where,
+ Nor any drop to drink.
+S. T. Coleridge (1772-1834)
+', to_tsquery('english', 'painted <-> Ship | !drink'));
+ ts_headline
+---------------------------------
And all the boards did shrink
+ Upon a painted Ocean. +
+ painted Ship +
+ Water, water, every where +
+(1 row)
+
+SELECT ts_headline('english', '
+Day after day, day after day,
+ We stuck, nor breath nor motion,
+As idle as a painted Ship
+ Upon a painted Ocean.
+Water, water, every where
+ And all the boards did shrink;
+Water, water, every where,
+ Nor any drop to drink.
+S. T. Coleridge (1772-1834)
+', phraseto_tsquery('english', 'painted Ocean'));
+ ts_headline
+----------------------------------
+ And all the boards did shrink;+
+ painted Ocean. +
+ Water, water, every
+ Water, water, every where +
(1 row)
SELECT ts_headline('english', '
@@ -1857,6 +1986,15 @@ to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'),
Lorem ipsum urna. Nullam nullam ullamcorper urna
(1 row)
+SELECT ts_headline('english',
+'Lorem ipsum urna. Nullam nullam ullamcorper urna.',
+phraseto_tsquery('english','ullamcorper urna'),
+'MaxWords=100, MinWords=5');
+ ts_headline
+-------------------------------------------------------------
+ urna. Nullam nullam ullamcorper urna.
+(1 row)
+
SELECT ts_headline('english', '
@@ -1899,9 +2037,9 @@ SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 & 3', 'MaxWords=4, MinWords=1
(1 row)
SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=4, MinWords=1');
- ts_headline
-----------------------------
- 3 1 3
+ ts_headline
+-------------------
+ 1 3
(1 row)
--Check if headline fragments work
@@ -2006,6 +2144,23 @@ to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'),
Lorem ipsum urna. Nullam nullam ullamcorper urna
(1 row)
+-- Edge cases with empty query
+SELECT ts_headline('english',
+'', to_tsquery('english', ''));
+NOTICE: text-search query doesn't contain lexemes: ""
+ ts_headline
+-------------
+
+(1 row)
+
+SELECT ts_headline('english',
+'foo bar', to_tsquery('english', ''));
+NOTICE: text-search query doesn't contain lexemes: ""
+ ts_headline
+-------------
+ foo bar
+(1 row)
+
--Rewrite sub system
CREATE TABLE test_tsquery (txtkeyword TEXT, txtsample TEXT);
\set ECHO none
@@ -2550,12 +2705,19 @@ select websearch_to_tsquery('simple', 'abc <-> def');
'abc' & 'def'
(1 row)
+-- parens are ignored, too
select websearch_to_tsquery('simple', 'abc (pg or class)');
websearch_to_tsquery
------------------------
'abc' & 'pg' | 'class'
(1 row)
+select websearch_to_tsquery('simple', '(foo bar) or (ding dong)');
+ websearch_to_tsquery
+---------------------------------
+ 'foo' & 'bar' | 'ding' & 'dong'
+(1 row)
+
-- NOT is ignored in quotes
select websearch_to_tsquery('english', 'My brand new smartphone');
websearch_to_tsquery
diff --git a/src/test/regress/expected/tuplesort_optimizer.out b/src/test/regress/expected/tuplesort_optimizer.out
index d62e0a0e71b..bda2821f75f 100644
--- a/src/test/regress/expected/tuplesort_optimizer.out
+++ b/src/test/regress/expected/tuplesort_optimizer.out
@@ -542,20 +542,22 @@ EXPLAIN (COSTS OFF) :qry;
-> GroupAggregate
Group Key: a.col12
Filter: (count(*) > 1)
- -> Merge Join
- Merge Cond: (a.col12 = b.col12)
- -> Sort
- Sort Key: a.col12 DESC
- -> Redistribute Motion 3:3 (slice2; segments: 3)
- Hash Key: a.col12
- -> Seq Scan on test_mark_restore a
- -> Sort
- Sort Key: b.col12 DESC
- -> Redistribute Motion 3:3 (slice3; segments: 3)
- Hash Key: b.col12
- -> Seq Scan on test_mark_restore b
+ -> Sort
+ Sort Key: a.col12 DESC, a.col1
+ -> Merge Join
+ Merge Cond: (a.col12 = b.col12)
+ -> Sort
+ Sort Key: a.col12 DESC
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: a.col12
+ -> Seq Scan on test_mark_restore a
+ -> Sort
+ Sort Key: b.col12 DESC
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: b.col12
+ -> Seq Scan on test_mark_restore b
Optimizer: Postgres query optimizer
-(22 rows)
+(24 rows)
:qry;
col12 | count | count | count | count | count
@@ -586,20 +588,22 @@ EXPLAIN (COSTS OFF) :qry;
-> GroupAggregate
Group Key: a.col12
Filter: (count(*) > 1)
- -> Merge Join
- Merge Cond: (a.col12 = b.col12)
- -> Sort
- Sort Key: a.col12 DESC
- -> Redistribute Motion 3:3 (slice2; segments: 3)
- Hash Key: a.col12
- -> Seq Scan on test_mark_restore a
- -> Sort
- Sort Key: b.col12 DESC
- -> Redistribute Motion 3:3 (slice3; segments: 3)
- Hash Key: b.col12
- -> Seq Scan on test_mark_restore b
+ -> Sort
+ Sort Key: a.col12 DESC, a.col1
+ -> Merge Join
+ Merge Cond: (a.col12 = b.col12)
+ -> Sort
+ Sort Key: a.col12 DESC
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: a.col12
+ -> Seq Scan on test_mark_restore a
+ -> Sort
+ Sort Key: b.col12 DESC
+ -> Redistribute Motion 3:3 (slice3; segments: 3)
+ Hash Key: b.col12
+ -> Seq Scan on test_mark_restore b
Optimizer: Postgres query optimizer
-(22 rows)
+(24 rows)
:qry;
col12 | count | count | count | count | count
diff --git a/src/test/regress/expected/union_optimizer.out b/src/test/regress/expected/union_optimizer.out
index 551709ba271..70d6901c4e1 100644
--- a/src/test/regress/expected/union_optimizer.out
+++ b/src/test/regress/expected/union_optimizer.out
@@ -1011,7 +1011,7 @@ SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1;
ERROR: column "q2" does not exist
LINE 1: ... int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1...
^
-HINT: There is a column named "q2" in table "*SELECT* 2", but it cannot be referenced from this part of the query.
+DETAIL: There is a column named "q2" in table "*SELECT* 2", but it cannot be referenced from this part of the query.
-- But this should work:
SELECT q1 FROM int8_tbl EXCEPT (((SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1))) ORDER BY 1;
q1
diff --git a/src/test/regress/expected/updatable_views_optimizer.out b/src/test/regress/expected/updatable_views_optimizer.out
index c1b53c4728e..6299b018b00 100644
--- a/src/test/regress/expected/updatable_views_optimizer.out
+++ b/src/test/regress/expected/updatable_views_optimizer.out
@@ -433,7 +433,7 @@ EXPLAIN (costs off) UPDATE rw_view1 SET a=6 WHERE a=5;
Sort Key: (DMLAction)
-> Redistribute Motion 3:3 (slice1; segments: 3)
Hash Key: a
- -> Split
+ -> Split Update
-> Index Scan using base_tbl_pkey on base_tbl
Index Cond: ((a = 5) AND (a > 0))
Optimizer: Pivotal Optimizer (GPORCA)
@@ -536,7 +536,7 @@ EXPLAIN (costs off) UPDATE rw_view2 SET aaa=5 WHERE aaa=4;
Sort Key: (DMLAction)
-> Redistribute Motion 3:3 (slice1; segments: 3)
Hash Key: a
- -> Split
+ -> Split Update
-> Index Scan using base_tbl_pkey on base_tbl
Index Cond: ((a = 4) AND (a < 10) AND (a > 0))
Optimizer: Pivotal Optimizer (GPORCA)
@@ -1014,6 +1014,7 @@ drop cascades to function rw_view1_aa(rw_view1)
-- permissions checks
CREATE USER regress_view_user1;
CREATE USER regress_view_user2;
+CREATE USER regress_view_user3;
SET SESSION AUTHORIZATION regress_view_user1;
CREATE TABLE base_tbl(a int, b text, c float);
INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0);
@@ -1240,8 +1241,244 @@ DROP TABLE base_tbl CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to view rw_view1
drop cascades to view rw_view2
+-- security invoker view permissions
+SET SESSION AUTHORIZATION regress_view_user1;
+CREATE TABLE base_tbl(a int, b text, c float);
+INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0);
+CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl;
+ALTER VIEW rw_view1 SET (security_invoker = true);
+INSERT INTO rw_view1 VALUES ('Row 2', 2.0, 2);
+GRANT SELECT ON rw_view1 TO regress_view_user2;
+GRANT UPDATE (bb,cc) ON rw_view1 TO regress_view_user2;
+SET SESSION AUTHORIZATION regress_view_user2;
+SELECT * FROM base_tbl; -- not allowed
+ERROR: permission denied for table base_tbl
+SELECT * FROM rw_view1; -- not allowed
+ERROR: permission denied for table base_tbl
+INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- not allowed
+ERROR: permission denied for table base_tbl
+INSERT INTO rw_view1 VALUES ('Row 3', 3.0, 3); -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE base_tbl SET a=a; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view1 SET bb=bb, cc=cc; -- not allowed
+ERROR: permission denied for table base_tbl
+DELETE FROM base_tbl; -- not allowed
+ERROR: permission denied for table base_tbl
+DELETE FROM rw_view1; -- not allowed
+ERROR: permission denied for view rw_view1
+SET SESSION AUTHORIZATION regress_view_user1;
+GRANT SELECT ON base_tbl TO regress_view_user2;
+GRANT UPDATE (a,c) ON base_tbl TO regress_view_user2;
+SET SESSION AUTHORIZATION regress_view_user2;
+SELECT * FROM base_tbl; -- ok
+ a | b | c
+---+-------+---
+ 1 | Row 1 | 1
+ 2 | Row 2 | 2
+(2 rows)
+
+SELECT * FROM rw_view1; -- ok
+ bb | cc | aa
+-------+----+----
+ Row 1 | 1 | 1
+ Row 2 | 2 | 2
+(2 rows)
+
+UPDATE base_tbl SET a=a, c=c; -- ok
+UPDATE base_tbl SET b=b; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view1 SET cc=cc; -- ok
+UPDATE rw_view1 SET aa=aa; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view1 SET bb=bb; -- not allowed
+ERROR: permission denied for table base_tbl
+SET SESSION AUTHORIZATION regress_view_user1;
+GRANT INSERT, DELETE ON base_tbl TO regress_view_user2;
+SET SESSION AUTHORIZATION regress_view_user2;
+INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- ok
+INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- not allowed
+ERROR: permission denied for view rw_view1
+DELETE FROM base_tbl WHERE a=1; -- ok
+DELETE FROM rw_view1 WHERE aa=2; -- not allowed
+ERROR: permission denied for view rw_view1
+SET SESSION AUTHORIZATION regress_view_user1;
+REVOKE INSERT, DELETE ON base_tbl FROM regress_view_user2;
+GRANT INSERT, DELETE ON rw_view1 TO regress_view_user2;
+SET SESSION AUTHORIZATION regress_view_user2;
+INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- not allowed
+ERROR: permission denied for table base_tbl
+DELETE FROM rw_view1 WHERE aa=2; -- not allowed
+ERROR: permission denied for table base_tbl
+SET SESSION AUTHORIZATION regress_view_user1;
+GRANT INSERT, DELETE ON base_tbl TO regress_view_user2;
+SET SESSION AUTHORIZATION regress_view_user2;
+INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- ok
+DELETE FROM rw_view1 WHERE aa=2; -- ok
+SELECT * FROM base_tbl; -- ok
+ a | b | c
+---+-------+---
+ 3 | Row 3 | 3
+ 4 | Row 4 | 4
+(2 rows)
+
+RESET SESSION AUTHORIZATION;
+DROP TABLE base_tbl CASCADE;
+NOTICE: drop cascades to view rw_view1
+-- ordinary view on top of security invoker view permissions
+CREATE TABLE base_tbl(a int, b text, c float);
+INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0);
+SET SESSION AUTHORIZATION regress_view_user1;
+CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl;
+ALTER VIEW rw_view1 SET (security_invoker = true);
+SELECT * FROM rw_view1; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view1 SET aa=aa; -- not allowed
+ERROR: permission denied for table base_tbl
+SET SESSION AUTHORIZATION regress_view_user2;
+CREATE VIEW rw_view2 AS SELECT cc AS ccc, aa AS aaa, bb AS bbb FROM rw_view1;
+GRANT SELECT, UPDATE ON rw_view2 TO regress_view_user3;
+SELECT * FROM rw_view2; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+RESET SESSION AUTHORIZATION;
+GRANT SELECT ON base_tbl TO regress_view_user1;
+GRANT UPDATE (a, b) ON base_tbl TO regress_view_user1;
+SET SESSION AUTHORIZATION regress_view_user1;
+SELECT * FROM rw_view1; -- ok
+ bb | cc | aa
+-------+----+----
+ Row 1 | 1 | 1
+(1 row)
+
+UPDATE rw_view1 SET aa=aa, bb=bb; -- ok
+UPDATE rw_view1 SET cc=cc; -- not allowed
+ERROR: permission denied for table base_tbl
+SET SESSION AUTHORIZATION regress_view_user2;
+SELECT * FROM rw_view2; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+SET SESSION AUTHORIZATION regress_view_user3;
+SELECT * FROM rw_view2; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+SET SESSION AUTHORIZATION regress_view_user1;
+GRANT SELECT ON rw_view1 TO regress_view_user2;
+GRANT UPDATE (bb, cc) ON rw_view1 TO regress_view_user2;
+SET SESSION AUTHORIZATION regress_view_user2;
+SELECT * FROM rw_view2; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET bbb=bbb; -- not allowed
+ERROR: permission denied for table base_tbl
+SET SESSION AUTHORIZATION regress_view_user3;
+SELECT * FROM rw_view2; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET bbb=bbb; -- not allowed
+ERROR: permission denied for table base_tbl
+RESET SESSION AUTHORIZATION;
+GRANT SELECT ON base_tbl TO regress_view_user2;
+GRANT UPDATE (a, c) ON base_tbl TO regress_view_user2;
+SET SESSION AUTHORIZATION regress_view_user2;
+SELECT * FROM rw_view2; -- ok
+ ccc | aaa | bbb
+-----+-----+-------
+ 1 | 1 | Row 1
+(1 row)
+
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET bbb=bbb; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET ccc=ccc; -- ok
+SET SESSION AUTHORIZATION regress_view_user3;
+SELECT * FROM rw_view2; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET bbb=bbb; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET ccc=ccc; -- not allowed
+ERROR: permission denied for table base_tbl
+RESET SESSION AUTHORIZATION;
+GRANT SELECT ON base_tbl TO regress_view_user3;
+GRANT UPDATE (a, c) ON base_tbl TO regress_view_user3;
+SET SESSION AUTHORIZATION regress_view_user3;
+SELECT * FROM rw_view2; -- ok
+ ccc | aaa | bbb
+-----+-----+-------
+ 1 | 1 | Row 1
+(1 row)
+
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET bbb=bbb; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET ccc=ccc; -- ok
+RESET SESSION AUTHORIZATION;
+REVOKE SELECT, UPDATE ON base_tbl FROM regress_view_user1;
+SET SESSION AUTHORIZATION regress_view_user1;
+SELECT * FROM rw_view1; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view1 SET aa=aa; -- not allowed
+ERROR: permission denied for table base_tbl
+SET SESSION AUTHORIZATION regress_view_user2;
+SELECT * FROM rw_view2; -- ok
+ ccc | aaa | bbb
+-----+-----+-------
+ 1 | 1 | Row 1
+(1 row)
+
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET bbb=bbb; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET ccc=ccc; -- ok
+SET SESSION AUTHORIZATION regress_view_user3;
+SELECT * FROM rw_view2; -- ok
+ ccc | aaa | bbb
+-----+-----+-------
+ 1 | 1 | Row 1
+(1 row)
+
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET bbb=bbb; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET ccc=ccc; -- ok
+RESET SESSION AUTHORIZATION;
+REVOKE SELECT, UPDATE ON base_tbl FROM regress_view_user2;
+SET SESSION AUTHORIZATION regress_view_user2;
+SELECT * FROM rw_view2; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET bbb=bbb; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET ccc=ccc; -- not allowed
+ERROR: permission denied for table base_tbl
+SET SESSION AUTHORIZATION regress_view_user3;
+SELECT * FROM rw_view2; -- ok
+ ccc | aaa | bbb
+-----+-----+-------
+ 1 | 1 | Row 1
+(1 row)
+
+UPDATE rw_view2 SET aaa=aaa; -- not allowed
+ERROR: permission denied for view rw_view1
+UPDATE rw_view2 SET bbb=bbb; -- not allowed
+ERROR: permission denied for table base_tbl
+UPDATE rw_view2 SET ccc=ccc; -- ok
+RESET SESSION AUTHORIZATION;
+DROP TABLE base_tbl CASCADE;
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to view rw_view1
+drop cascades to view rw_view2
DROP USER regress_view_user1;
DROP USER regress_view_user2;
+DROP USER regress_view_user3;
-- column defaults
CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified', c serial);
INSERT INTO base_tbl VALUES (1, 'Row 1');
@@ -1381,6 +1618,9 @@ DETAIL: View columns that refer to system columns are not updatable.
INSERT INTO rw_view1 (s, c, a) VALUES (null, null, 1.1); -- should fail
ERROR: cannot insert into column "s" of view "rw_view1"
DETAIL: View columns that are not columns of their base relation are not updatable.
+INSERT INTO rw_view1 (s, c, a) VALUES (default, default, 1.1); -- should fail
+ERROR: cannot insert into column "s" of view "rw_view1"
+DETAIL: View columns that are not columns of their base relation are not updatable.
INSERT INTO rw_view1 (a) VALUES (1.1) RETURNING a, s, c; -- OK
a | s | c
-----+-------------------+-------------------
@@ -1697,19 +1937,19 @@ CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a < b
a | integer | | | | plain |
b | integer | | | | plain |
View definition:
- SELECT base_tbl.a,
- base_tbl.b
+ SELECT a,
+ b
FROM base_tbl
- WHERE base_tbl.a < base_tbl.b;
+ WHERE a < b;
Options: check_option=local
SELECT * FROM information_schema.views WHERE table_name = 'rw_view1';
- table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
----------------+--------------+------------+------------------------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
- regression | public | rw_view1 | SELECT base_tbl.a, +| LOCAL | YES | YES | NO | NO | NO
- | | | base_tbl.b +| | | | | |
- | | | FROM base_tbl +| | | | | |
- | | | WHERE (base_tbl.a < base_tbl.b); | | | | | |
+ table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
+---------------+--------------+------------+------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
+ regression | public | rw_view1 | SELECT a, +| LOCAL | YES | YES | NO | NO | NO
+ | | | b +| | | | | |
+ | | | FROM base_tbl+| | | | | |
+ | | | WHERE (a < b); | | | | | |
(1 row)
INSERT INTO rw_view1 VALUES(3,4); -- ok
@@ -1750,17 +1990,17 @@ CREATE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a < 10
--------+---------+-----------+----------+---------+---------+-------------
a | integer | | | | plain |
View definition:
- SELECT rw_view1.a
+ SELECT a
FROM rw_view1
- WHERE rw_view1.a < 10;
+ WHERE a < 10;
Options: check_option=cascaded
SELECT * FROM information_schema.views WHERE table_name = 'rw_view2';
- table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
----------------+--------------+------------+----------------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
- regression | public | rw_view2 | SELECT rw_view1.a +| CASCADED | YES | YES | NO | NO | NO
- | | | FROM rw_view1 +| | | | | |
- | | | WHERE (rw_view1.a < 10); | | | | | |
+ table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
+---------------+--------------+------------+-------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
+ regression | public | rw_view2 | SELECT a +| CASCADED | YES | YES | NO | NO | NO
+ | | | FROM rw_view1 +| | | | | |
+ | | | WHERE (a < 10); | | | | | |
(1 row)
INSERT INTO rw_view2 VALUES (-5); -- should fail
@@ -1790,17 +2030,17 @@ CREATE OR REPLACE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a < 10
--------+---------+-----------+----------+---------+---------+-------------
a | integer | | | | plain |
View definition:
- SELECT rw_view1.a
+ SELECT a
FROM rw_view1
- WHERE rw_view1.a < 10;
+ WHERE a < 10;
Options: check_option=local
SELECT * FROM information_schema.views WHERE table_name = 'rw_view2';
- table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
----------------+--------------+------------+----------------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
- regression | public | rw_view2 | SELECT rw_view1.a +| LOCAL | YES | YES | NO | NO | NO
- | | | FROM rw_view1 +| | | | | |
- | | | WHERE (rw_view1.a < 10); | | | | | |
+ table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
+---------------+--------------+------------+-------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
+ regression | public | rw_view2 | SELECT a +| LOCAL | YES | YES | NO | NO | NO
+ | | | FROM rw_view1 +| | | | | |
+ | | | WHERE (a < 10); | | | | | |
(1 row)
INSERT INTO rw_view2 VALUES (-10); -- ok, but not in view
@@ -1831,16 +2071,16 @@ ALTER VIEW rw_view2 RESET (check_option);
--------+---------+-----------+----------+---------+---------+-------------
a | integer | | | | plain |
View definition:
- SELECT rw_view1.a
+ SELECT a
FROM rw_view1
- WHERE rw_view1.a < 10;
+ WHERE a < 10;
SELECT * FROM information_schema.views WHERE table_name = 'rw_view2';
- table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
----------------+--------------+------------+----------------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
- regression | public | rw_view2 | SELECT rw_view1.a +| NONE | YES | YES | NO | NO | NO
- | | | FROM rw_view1 +| | | | | |
- | | | WHERE (rw_view1.a < 10); | | | | | |
+ table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
+---------------+--------------+------------+-------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
+ regression | public | rw_view2 | SELECT a +| NONE | YES | YES | NO | NO | NO
+ | | | FROM rw_view1 +| | | | | |
+ | | | WHERE (a < 10); | | | | | |
(1 row)
INSERT INTO rw_view2 VALUES (30); -- ok, but not in view
@@ -1862,15 +2102,15 @@ CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WITH CHECK OPTION;
CREATE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a > 0;
CREATE VIEW rw_view3 AS SELECT * FROM rw_view2 WITH CHECK OPTION;
SELECT * FROM information_schema.views WHERE table_name LIKE E'rw\\_view_' ORDER BY table_name;
- table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
----------------+--------------+------------+---------------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
- regression | public | rw_view1 | SELECT base_tbl.a +| CASCADED | YES | YES | NO | NO | NO
- | | | FROM base_tbl; | | | | | |
- regression | public | rw_view2 | SELECT rw_view1.a +| NONE | YES | YES | NO | NO | NO
- | | | FROM rw_view1 +| | | | | |
- | | | WHERE (rw_view1.a > 0); | | | | | |
- regression | public | rw_view3 | SELECT rw_view2.a +| CASCADED | YES | YES | NO | NO | NO
- | | | FROM rw_view2; | | | | | |
+ table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into
+---------------+--------------+------------+-------------------+--------------+--------------+--------------------+----------------------+----------------------+----------------------------
+ regression | public | rw_view1 | SELECT a +| CASCADED | YES | YES | NO | NO | NO
+ | | | FROM base_tbl; | | | | | |
+ regression | public | rw_view2 | SELECT a +| NONE | YES | YES | NO | NO | NO
+ | | | FROM rw_view1 +| | | | | |
+ | | | WHERE (a > 0); | | | | | |
+ regression | public | rw_view3 | SELECT a +| CASCADED | YES | YES | NO | NO | NO
+ | | | FROM rw_view2; | | | | | |
(3 rows)
INSERT INTO rw_view1 VALUES (-1); -- ok
@@ -1943,7 +2183,7 @@ EXPLAIN (costs off) UPDATE rw_view1 SET a = a + 5;
-----------------------------------------------------------------
Update on base_tbl b
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
- -> Split
+ -> Split Update
-> Nested Loop
Join Filter: (b.a = r.a)
-> Seq Scan on base_tbl b
@@ -2613,6 +2853,40 @@ DROP VIEW v1;
DROP TABLE t2;
DROP TABLE t1;
--
+-- Test sub-select in nested security barrier views, per bug #17972
+--
+CREATE TABLE t1 (a int);
+CREATE VIEW v1 WITH (security_barrier = true) AS
+ SELECT * FROM t1;
+CREATE RULE v1_upd_rule AS ON UPDATE TO v1 DO INSTEAD
+ UPDATE t1 SET a = NEW.a WHERE a = OLD.a;
+CREATE VIEW v2 WITH (security_barrier = true) AS
+ SELECT * FROM v1 WHERE EXISTS (SELECT 1);
+EXPLAIN (COSTS OFF) UPDATE v2 SET a = 1;
+ QUERY PLAN
+------------------------------------------------------------
+ Update on t1
+ InitPlan 1 (returns $0) (slice2)
+ -> Result
+ -> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
+ -> Split Update
+ -> Merge Join
+ Merge Cond: (t1.a = v1.a)
+ -> Sort
+ Sort Key: t1.a
+ -> Seq Scan on t1
+ -> Sort
+ Sort Key: v1.a
+ -> Subquery Scan on v1
+ -> Result
+ One-Time Filter: $0
+ -> Seq Scan on t1 t1_1
+(17 rows)
+
+DROP VIEW v2;
+DROP VIEW v1;
+DROP TABLE t1;
+--
-- Test CREATE OR REPLACE VIEW turning a non-updatable view into an
-- auto-updatable view and adding check options in a single step
--
diff --git a/src/test/regress/expected/update_gp.out b/src/test/regress/expected/update_gp.out
index 2dc6986a891..e797ccac194 100644
--- a/src/test/regress/expected/update_gp.out
+++ b/src/test/regress/expected/update_gp.out
@@ -418,7 +418,7 @@ EXPLAIN (COSTS OFF ) UPDATE tab3 SET C1 = C1 + 1, C5 = C5+1;
---------------------------------------------------------------
Update on tab3
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
- -> Split Update
+ -> Split Update Update
-> Seq Scan on tab3
Optimizer: Postgres query optimizer
(5 rows)
@@ -693,7 +693,7 @@ explain update tsplit_entry set c = s.a from (select count(*) as a from gp_segme
------------------------------------------------------------------------------------------------------------------
Update on tsplit_entry (cost=10000000001.00..10000000003.18 rows=3 width=54)
-> Explicit Redistribute Motion 1:3 (slice) (cost=10000000001.00..10000000003.18 rows=7 width=54)
- -> Split (cost=10000000001.00..10000000003.18 rows=7 width=54)
+ -> Split Update (cost=10000000001.00..10000000003.18 rows=7 width=54)
-> Nested Loop (cost=10000000001.00..10000000003.12 rows=4 width=54)
-> Gather Motion 3:1 (slice2; segments: 3) (cost=0.00..2.06 rows=2 width=14)
-> Seq Scan on tsplit_entry (cost=0.00..2.02 rows=1 width=14)
diff --git a/src/test/regress/expected/update_gp_optimizer.out b/src/test/regress/expected/update_gp_optimizer.out
index 44f3ed1e9a1..5e8c50b1393 100644
--- a/src/test/regress/expected/update_gp_optimizer.out
+++ b/src/test/regress/expected/update_gp_optimizer.out
@@ -85,10 +85,10 @@ ERROR: can't split update for inherit table: base_tbl (preptlist.c:138)
Update on child_a base_tbl_1
Update on child_b base_tbl_2
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
- -> Split
+ -> Split Update
-> Seq Scan on base_tbl
-> Explicit Redistribute Motion 3:3 (slice2; segments: 3)
- -> Split
+ -> Split Update
-> Seq Scan on child_a base_tbl_1
-> Seq Scan on child_b base_tbl_2
Optimizer: Postgres query optimizer
@@ -445,7 +445,7 @@ EXPLAIN (COSTS OFF ) UPDATE tab3 SET C1 = C1 + 1, C5 = C5+1;
-> Result
-> Redistribute Motion 3:3 (slice1; segments: 3)
Hash Key: c1, c2, c3
- -> Split
+ -> Split Update
-> Seq Scan on tab3
Optimizer: Pivotal Optimizer (GPORCA)
(9 rows)
@@ -700,7 +700,7 @@ explain update nosplitupdate set a=0 where a=1 and a<1;
One-Time Filter: true
-> Result (cost=0.00..0.00 rows=0 width=22)
-> Result (cost=0.00..0.00 rows=0 width=18)
- -> Split (cost=0.00..0.00 rows=0 width=18)
+ -> Split Update (cost=0.00..0.00 rows=0 width=18)
-> Result (cost=0.00..0.00 rows=0 width=18)
One-Time Filter: false
Optimizer: Pivotal Optimizer (GPORCA)
@@ -723,7 +723,7 @@ explain update tsplit_entry set c = s.a from (select count(*) as a from gp_segme
-> Redistribute Motion 1:3 (slice1) (cost=0.00..882689.42 rows=4 width=22)
Hash Key: tsplit_entry_1.c
-> Result (cost=0.00..882689.42 rows=4 width=22)
- -> Split (cost=0.00..882689.42 rows=4 width=18)
+ -> Split Update (cost=0.00..882689.42 rows=4 width=18)
-> Nested Loop (cost=0.00..882689.42 rows=2 width=22)
Join Filter: true
-> Aggregate (cost=0.00..0.00 rows=1 width=8)
diff --git a/src/test/regress/expected/update_optimizer.out b/src/test/regress/expected/update_optimizer.out
index a3335abbe4d..f49fb127b7c 100755
--- a/src/test/regress/expected/update_optimizer.out
+++ b/src/test/regress/expected/update_optimizer.out
@@ -177,7 +177,7 @@ UPDATE update_test t
Update on public.update_test t
-> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
Output: ($1), ($2), t.c, ((SubPlan 1 (returns $1,$2))), t.ctid, t.gp_segment_id, (DMLAction)
- -> Split
+ -> Split Update
Output: ($1), ($2), t.c, ((SubPlan 1 (returns $1,$2))), t.ctid, t.gp_segment_id, DMLAction
-> Seq Scan on public.update_test t
Output: $1, $2, t.c, (SubPlan 1 (returns $1,$2)), t.ctid, t.gp_segment_id
diff --git a/src/test/regress/expected/with_clause_optimizer.out b/src/test/regress/expected/with_clause_optimizer.out
index 3fbea887ef7..370c7b79684 100644
--- a/src/test/regress/expected/with_clause_optimizer.out
+++ b/src/test/regress/expected/with_clause_optimizer.out
@@ -1029,7 +1029,7 @@ SELECT pg_get_viewdef('my_view'::regclass);
SELECT sum(with_test1.value) AS sum+
FROM with_test1 +
) +
- SELECT my_sum.total +
+ SELECT total +
FROM my_sum;
(1 row)
@@ -1040,7 +1040,7 @@ SELECT pg_get_viewdef('my_view'::regclass, true);
SELECT sum(with_test1.value) AS sum+
FROM with_test1 +
) +
- SELECT my_sum.total +
+ SELECT total +
FROM my_sum;
(1 row)
@@ -1063,7 +1063,7 @@ SELECT pg_get_viewdef('my_view'::regclass);
SELECT sum(my_group_sum.total) AS sum +
FROM my_group_sum +
) +
- SELECT my_sum.total +
+ SELECT total +
FROM my_sum;
(1 row)
@@ -1079,7 +1079,7 @@ SELECT pg_get_viewdef('my_view'::regclass, true);
SELECT sum(my_group_sum.total) AS sum +
FROM my_group_sum +
) +
- SELECT my_sum.total +
+ SELECT total +
FROM my_sum;
(1 row)
diff --git a/src/test/regress/expected/with_optimizer.out b/src/test/regress/expected/with_optimizer.out
index acd06e1f4ba..1f1413ac69f 100644
--- a/src/test/regress/expected/with_optimizer.out
+++ b/src/test/regress/expected/with_optimizer.out
@@ -404,9 +404,9 @@ SELECT pg_get_viewdef('vsubdepartment'::regclass);
subdepartment sd +
WHERE (d.parent_department = sd.id)+
) +
- SELECT subdepartment.id, +
- subdepartment.parent_department, +
- subdepartment.name +
+ SELECT id, +
+ parent_department, +
+ name +
FROM subdepartment;
(1 row)
@@ -427,9 +427,9 @@ SELECT pg_get_viewdef('vsubdepartment'::regclass, true);
subdepartment sd +
WHERE d.parent_department = sd.id+
) +
- SELECT subdepartment.id, +
- subdepartment.parent_department, +
- subdepartment.name +
+ SELECT id, +
+ parent_department, +
+ name +
FROM subdepartment;
(1 row)
@@ -454,7 +454,7 @@ View definition:
FROM t t_1
WHERE t_1.n < 100
)
- SELECT sum(t.n) AS sum
+ SELECT sum(n) AS sum
FROM t;
-- corner case in which sub-WITH gets initialized first
@@ -645,6 +645,21 @@ SELECT t1.id, t2.path, t2 FROM t AS t1 JOIN t AS t2 ON
14 | {2,4,9,14} | (14,"{2,4,9,14}")
(16 rows)
+CREATE TEMP TABLE duplicates (a INT NOT NULL);
+INSERT INTO duplicates VALUES(1), (1);
+-- Try out a recursive UNION case where the non-recursive part's table slot
+-- uses TTSOpsBufferHeapTuple and contains duplicate rows.
+WITH RECURSIVE cte (a) as (
+ SELECT a FROM duplicates
+ UNION
+ SELECT a FROM cte
+)
+SELECT a FROM cte;
+ a
+---
+ 1
+(1 row)
+
-- SEARCH clause
create table graph0( f int, t int, label text );
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'f' as the Apache Cloudberry data distribution key for this table.
@@ -799,6 +814,75 @@ select * from search_graph order by seq;
4 | 5 | arc 4 -> 5 | (1,4,5)
(7 rows)
+-- a constant initial value causes issues for EXPLAIN
+explain (verbose, costs off)
+with recursive test as (
+ select 1 as x
+ union all
+ select x + 1
+ from test
+) search depth first by x set y
+select * from test limit 5;
+QUERY PLAN
+___________
+ Limit
+ Output: (1), (ARRAY[ROW(1)])
+ -> Recursive Union
+ -> Result
+ Output: 1, ARRAY[ROW(1)]
+ -> WorkTable Scan on test
+ Output: (test.x + 1), array_cat(test.y, ARRAY[ROW((test.x + 1))])
+
+with recursive test as (
+ select 1 as x
+ union all
+ select x + 1
+ from test
+) search depth first by x set y
+select * from test limit 5;
+ x | y
+---+-----------------------
+ 1 | {(1)}
+ 2 | {(1),(2)}
+ 3 | {(1),(2),(3)}
+ 4 | {(1),(2),(3),(4)}
+ 5 | {(1),(2),(3),(4),(5)}
+(5 rows)
+
+explain (verbose, costs off)
+with recursive test as (
+ select 1 as x
+ union all
+ select x + 1
+ from test
+) search breadth first by x set y
+select * from test limit 5;
+QUERY PLAN
+___________
+ Limit
+ Output: (1), (ROW('0'::bigint, 1))
+ -> Recursive Union
+ -> Result
+ Output: 1, ROW('0'::bigint, 1)
+ -> WorkTable Scan on test
+ Output: (test.x + 1), ROW(int8inc((test.y)."*DEPTH*"), (test.x + 1))
+
+with recursive test as (
+ select 1 as x
+ union all
+ select x + 1
+ from test
+) search breadth first by x set y
+select * from test limit 5;
+ x | y
+---+-------
+ 1 | (0,1)
+ 2 | (1,2)
+ 3 | (2,3)
+ 4 | (3,4)
+ 5 | (4,5)
+(5 rows)
+
-- various syntax errors
with recursive search_graph(f, t, label) as (
select * from graph0 g
@@ -891,9 +975,9 @@ select pg_get_viewdef('v_search');
search_graph sg +
WHERE (g.f = sg.t) +
) SEARCH DEPTH FIRST BY f, t SET seq +
- SELECT search_graph.f, +
- search_graph.t, +
- search_graph.label +
+ SELECT f, +
+ t, +
+ label +
FROM search_graph;
(1 row)
@@ -1038,6 +1122,35 @@ select * from search_graph order by path;
(25 rows)
-- CYCLE clause
+explain (verbose, costs off)
+with recursive search_graph(f, t, label) as (
+ select * from graph g
+ union all
+ select g.*
+ from graph g, search_graph sg
+ where g.f = sg.t
+) cycle f, t set is_cycle using path
+select * from search_graph;
+QUERY PLAN
+___________
+ Gather Motion 3:1 (slice1; segments: 3)
+ Output: g.f, g.t, g.label, (false), (ARRAY[ROW(g.f, g.t)])
+ -> Recursive Union
+ -> Seq Scan on pg_temp.graph g
+ Output: g.f, g.t, g.label, false, ARRAY[ROW(g.f, g.t)]
+ -> Hash Join
+ Output: g_1.f, g_1.t, g_1.label, CASE WHEN (ROW(g_1.f, g_1.t) = ANY (sg.path)) THEN true ELSE false END, array_cat(sg.path, ARRAY[ROW(g_1.f, g_1.t)])
+ Hash Cond: (sg.t = g_1.f)
+ -> WorkTable Scan on search_graph sg
+ Output: sg.f, sg.t, sg.label, sg.is_cycle, sg.path
+ Filter: (NOT sg.is_cycle)
+ -> Hash
+ Output: g_1.f, g_1.t, g_1.label
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ Output: g_1.f, g_1.t, g_1.label
+ -> Seq Scan on pg_temp.graph g_1
+ Output: g_1.f, g_1.t, g_1.label
+
with recursive search_graph(f, t, label) as (
select * from graph g
union all
@@ -1112,6 +1225,70 @@ select * from search_graph;
2 | 3 | arc 2 -> 3 | N | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"}
(25 rows)
+explain (verbose, costs off)
+with recursive test as (
+ select 0 as x
+ union all
+ select (x + 1) % 10
+ from test
+) cycle x set is_cycle using path
+select * from test;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------
+ Recursive Union
+ -> Result
+ Output: 0, false, ARRAY[ROW(0)]
+ -> WorkTable Scan on test
+ Output: ((test.x + 1) % 10), CASE WHEN (ROW(((test.x + 1) % 10)) = ANY (test.path)) THEN true ELSE false END, array_cat(test.path, ARRAY[ROW(((test.x + 1) % 10))])
+ Filter: (NOT test.is_cycle)
+ Optimizer: Postgres query optimizer
+(8 rows)
+
+with recursive test as (
+ select 0 as x
+ union all
+ select (x + 1) % 10
+ from test
+) cycle x set is_cycle using path
+select * from test;
+ x | is_cycle | path
+---+----------+-----------------------------------------------
+ 0 | f | {(0)}
+ 0 | t | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(0)}
+ 1 | f | {(0),(1)}
+ 2 | f | {(0),(1),(2)}
+ 3 | f | {(0),(1),(2),(3)}
+ 4 | f | {(0),(1),(2),(3),(4)}
+ 5 | f | {(0),(1),(2),(3),(4),(5)}
+ 6 | f | {(0),(1),(2),(3),(4),(5),(6)}
+ 7 | f | {(0),(1),(2),(3),(4),(5),(6),(7)}
+ 8 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8)}
+ 9 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9)}
+(11 rows)
+
+with recursive test as (
+ select 0 as x
+ union all
+ select (x + 1) % 10
+ from test
+ where not is_cycle -- redundant, but legal
+) cycle x set is_cycle using path
+select * from test;
+ x | is_cycle | path
+---+----------+-----------------------------------------------
+ 0 | f | {(0)}
+ 0 | t | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(0)}
+ 1 | f | {(0),(1)}
+ 2 | f | {(0),(1),(2)}
+ 3 | f | {(0),(1),(2),(3)}
+ 4 | f | {(0),(1),(2),(3),(4)}
+ 5 | f | {(0),(1),(2),(3),(4),(5)}
+ 6 | f | {(0),(1),(2),(3),(4),(5),(6)}
+ 7 | f | {(0),(1),(2),(3),(4),(5),(6),(7)}
+ 8 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8)}
+ 9 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9)}
+(11 rows)
+
-- multiple CTEs
with recursive
graph(f, t, label) as (
@@ -1163,7 +1340,7 @@ select f, t, label from search_graph;
with recursive a as (
select 1 as b
union all
- select * from a
+ select * from a
) cycle b set c using p
select * from a;
b | c | p
@@ -1384,9 +1561,9 @@ select pg_get_viewdef('v_cycle1');
search_graph sg +
WHERE (g.f = sg.t) +
) CYCLE f, t SET is_cycle USING path +
- SELECT search_graph.f, +
- search_graph.t, +
- search_graph.label +
+ SELECT f, +
+ t, +
+ label +
FROM search_graph;
(1 row)
@@ -1406,9 +1583,9 @@ select pg_get_viewdef('v_cycle2');
search_graph sg +
WHERE (g.f = sg.t) +
) CYCLE f, t SET is_cycle TO 'Y'::text DEFAULT 'N'::text USING path+
- SELECT search_graph.f, +
- search_graph.t, +
- search_graph.label +
+ SELECT f, +
+ t, +
+ label +
FROM search_graph;
(1 row)
@@ -1869,6 +2046,54 @@ WITH RECURSIVE x(n) AS (
ERROR: window functions in the target list of a recursive query is not supported
LINE 4: SELECT level+1, row_number() over() FROM x, bar)
^
+-- allow this, because we historically have
+WITH RECURSIVE x(n) AS (
+ WITH x1 AS (SELECT 1 AS n)
+ SELECT 0
+ UNION
+ SELECT * FROM x1)
+ SELECT * FROM x;
+ n
+---
+ 0
+ 1
+(2 rows)
+
+-- but this should be rejected
+WITH RECURSIVE x(n) AS (
+ WITH x1 AS (SELECT 1 FROM x)
+ SELECT 0
+ UNION
+ SELECT * FROM x1)
+ SELECT * FROM x;
+ERROR: recursive reference to query "x" must not appear within a subquery
+LINE 2: WITH x1 AS (SELECT 1 FROM x)
+ ^
+-- and this too
+WITH RECURSIVE x(n) AS (
+ (WITH x1 AS (SELECT 1 FROM x) SELECT * FROM x1)
+ UNION
+ SELECT 0)
+ SELECT * FROM x;
+ERROR: recursive reference to query "x" must not appear within its non-recursive term
+LINE 2: (WITH x1 AS (SELECT 1 FROM x) SELECT * FROM x1)
+ ^
+-- and this
+WITH RECURSIVE x(n) AS (
+ SELECT 0 UNION SELECT 1
+ ORDER BY (SELECT n FROM x))
+ SELECT * FROM x;
+ERROR: ORDER BY in a recursive query is not implemented
+LINE 3: ORDER BY (SELECT n FROM x))
+ ^
+-- and this
+WITH RECURSIVE x(n) AS (
+ WITH sub_cte AS (SELECT * FROM x)
+ DELETE FROM graph RETURNING f)
+ SELECT * FROM x;
+ERROR: recursive query "x" must not contain data-modifying statements
+LINE 1: WITH RECURSIVE x(n) AS (
+ ^
CREATE TEMPORARY TABLE y (a INTEGER) DISTRIBUTED RANDOMLY;
INSERT INTO y SELECT generate_series(1, 10);
-- LEFT JOIN
@@ -2170,7 +2395,7 @@ WITH outermost(x) AS (
)
SELECT * FROM outermost ORDER BY 1;
ERROR: relation "outermost" does not exist
-LINE 4: SELECT * FROM outermost
+LINE 4: SELECT * FROM outermost -- fail
^
DETAIL: There is a WITH item named "outermost", but it cannot be referenced from this part of the query.
HINT: Use WITH RECURSIVE, or re-order the WITH items to remove forward references.
@@ -2194,7 +2419,7 @@ WITH RECURSIVE outermost(x) AS (
)
SELECT * FROM outermost ORDER BY 1;
ERROR: recursive reference to query "outermost" must not appear within a subquery
-LINE 2: WITH innermost as (SELECT 2 FROM outermost)
+LINE 2: WITH innermost as (SELECT 2 FROM outermost) -- fail
^
--
-- This test will fail with the old implementation of PARAM_EXEC parameter
@@ -2581,14 +2806,6 @@ INSERT INTO bug6051 SELECT * FROM t1;
ERROR: writable CTE queries cannot be themselves writable
DETAIL: Apache Cloudberry currently only support CTEs with one writable clause, called in a non-writable context.
HINT: Rewrite the query to only include one writable clause.
-SELECT * FROM bug6051;
- i
----
- 1
- 2
- 3
-(3 rows)
-
CREATE TEMP TABLE bug6051_2 (i int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
@@ -2608,12 +2825,7 @@ SELECT * FROM bug6051;
3
(3 rows)
-SELECT * FROM bug6051_2;
- i
----
-(0 rows)
-
--- check INSERT...SELECT rule actions are disallowed on commands
+-- check INSERT ... SELECT rule actions are disallowed on commands
-- that have modifyingCTEs
CREATE OR REPLACE RULE bug6051_ins AS ON INSERT TO bug6051 DO INSTEAD
INSERT INTO bug6051_2
@@ -2629,7 +2841,7 @@ CREATE TEMP TABLE bug6051_3 AS
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entry.
CREATE RULE bug6051_3_ins AS ON INSERT TO bug6051_3 DO INSTEAD
SELECT i FROM bug6051_2;
-BEGIN; SET LOCAL force_parallel_mode = on;
+BEGIN; SET LOCAL debug_parallel_query = on;
WITH t1 AS ( DELETE FROM bug6051_3 RETURNING * )
INSERT INTO bug6051_3 SELECT * FROM t1;
ERROR: writable CTE queries cannot be themselves writable
@@ -2899,9 +3111,181 @@ UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = withz.k)
RETURNING k, v;
ERROR: modification of distribution columns in OnConflictUpdate is not supported
DROP TABLE withz;
+-- WITH referenced by MERGE statement
+CREATE TABLE m AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i;
+ALTER TABLE m ADD UNIQUE (k);
+ERROR: UNIQUE and DISTRIBUTED RANDOMLY are incompatible
+WITH RECURSIVE cte_basic AS (SELECT 1 a, 'cte_basic val' b)
+MERGE INTO m USING (select 0 k, 'merge source SubPlan' v) o ON m.k=o.k
+WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1)
+WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
+ERROR: WITH RECURSIVE is not supported for MERGE statement
+-- Basic:
+WITH cte_basic AS MATERIALIZED (SELECT 1 a, 'cte_basic val' b)
+MERGE INTO m USING (select 0 k, 'merge source SubPlan' v offset 0) o ON m.k=o.k
+WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1)
+WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
+-- Examine
+SELECT * FROM m where k = 0;
+ k | v
+---+---
+(0 rows)
+
+-- See EXPLAIN output for same query:
+EXPLAIN (VERBOSE, COSTS OFF)
+WITH cte_basic AS MATERIALIZED (SELECT 1 a, 'cte_basic val' b)
+MERGE INTO m USING (select 0 k, 'merge source SubPlan' v offset 0) o ON m.k=o.k
+WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1)
+WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
+QUERY PLAN
+___________
+ Merge on public.m
+ -> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
+ Output: m.ctid, m.gp_segment_id, o.k, o.v, o.*
+ -> Split Merge
+ Output: m.ctid, m.gp_segment_id, o.k, o.v, o.*
+ -> Hash Right Join
+ Output: m.ctid, m.gp_segment_id, o.k, o.v, o.*
+ Hash Cond: (m.k = o.k)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Output: m.ctid, m.gp_segment_id, m.k
+ Hash Key: m.k
+ -> Seq Scan on public.m
+ Output: m.ctid, m.gp_segment_id, m.k
+ -> Hash
+ Output: o.k, o.v, o.*
+ -> Redistribute Motion 1:3 (slice3; segments: 1)
+ Output: o.k, o.v, o.*
+ Hash Key: o.k
+ -> Subquery Scan on o
+ Output: o.k, o.v, o.*
+ -> Result
+ Output: 0, 'merge source SubPlan'::text
+ SubPlan 1
+ -> Limit
+ Output: ((share0_ref1.b || ' merge update'::text))
+ -> Result
+ Output: (share0_ref1.b || ' merge update'::text)
+ Filter: (share0_ref1.a = m.k)
+ -> Shared Scan (share slice:id 0:0)
+ Output: share0_ref1.a, share0_ref1.b
+ -> Result
+ Output: 1, 'cte_basic val'::text
+
+-- InitPlan
+WITH cte_init AS MATERIALIZED (SELECT 1 a, 'cte_init val' b)
+MERGE INTO m USING (select 1 k, 'merge source InitPlan' v offset 0) o ON m.k=o.k
+WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_init WHERE a = 1 LIMIT 1)
+WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
+-- Examine
+SELECT * FROM m where k = 1;
+ k | v
+---+---------------------------
+ 1 | cte_init val merge update
+(1 row)
+
+-- See EXPLAIN output for same query:
+EXPLAIN (VERBOSE, COSTS OFF)
+WITH cte_init AS MATERIALIZED (SELECT 1 a, 'cte_init val' b)
+MERGE INTO m USING (select 1 k, 'merge source InitPlan' v offset 0) o ON m.k=o.k
+WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_init WHERE a = 1 LIMIT 1)
+WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
+QUERY PLAN
+___________
+ Merge on public.m
+ InitPlan 1 (returns $0) (slice4)
+ -> Limit
+ Output: ((cte_init.b || ' merge update'::text))
+ -> Subquery Scan on cte_init
+ Output: (cte_init.b || ' merge update'::text)
+ Filter: (cte_init.a = 1)
+ -> Shared Scan (share slice:id 4:0)
+ Output: share0_ref1.a, share0_ref1.b
+ -> Result
+ Output: 1, 'cte_init val'::text
+ -> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
+ Output: m.ctid, m.gp_segment_id, o.k, o.v, o.*
+ -> Split Merge
+ Output: m.ctid, m.gp_segment_id, o.k, o.v, o.*
+ -> Hash Right Join
+ Output: m.ctid, m.gp_segment_id, o.k, o.v, o.*
+ Hash Cond: (m.k = o.k)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Output: m.ctid, m.gp_segment_id, m.k
+ Hash Key: m.k
+ -> Seq Scan on public.m
+ Output: m.ctid, m.gp_segment_id, m.k
+ -> Hash
+ Output: o.k, o.v, o.*
+ -> Redistribute Motion 1:3 (slice3; segments: 1)
+ Output: o.k, o.v, o.*
+ Hash Key: o.k
+ -> Subquery Scan on o
+ Output: o.k, o.v, o.*
+ -> Result
+ Output: 1, 'merge source InitPlan'::text
+
+-- MERGE source comes from CTE:
+WITH merge_source_cte AS MATERIALIZED (SELECT 15 a, 'merge_source_cte val' b)
+MERGE INTO m USING (select * from merge_source_cte) o ON m.k=o.a
+WHEN MATCHED THEN UPDATE SET v = (SELECT b || merge_source_cte.*::text || ' merge update' FROM merge_source_cte WHERE a = 15)
+WHEN NOT MATCHED THEN INSERT VALUES(o.a, o.b || (SELECT merge_source_cte.*::text || ' merge insert' FROM merge_source_cte));
+-- Examine
+SELECT * FROM m where k = 15;
+ k | v
+---+---
+(0 rows)
+
+-- See EXPLAIN output for same query:
+EXPLAIN (VERBOSE, COSTS OFF)
+WITH merge_source_cte AS MATERIALIZED (SELECT 15 a, 'merge_source_cte val' b)
+MERGE INTO m USING (select * from merge_source_cte) o ON m.k=o.a
+WHEN MATCHED THEN UPDATE SET v = (SELECT b || merge_source_cte.*::text || ' merge update' FROM merge_source_cte WHERE a = 15)
+WHEN NOT MATCHED THEN INSERT VALUES(o.a, o.b || (SELECT merge_source_cte.*::text || ' merge insert' FROM merge_source_cte));
+QUERY PLAN
+___________
+ Merge on public.m
+ InitPlan 1 (returns $0) (slice4)
+ -> Subquery Scan on merge_source_cte
+ Output: ((merge_source_cte.b || (merge_source_cte.*)::text) || ' merge update'::text)
+ Filter: (merge_source_cte.a = 15)
+ -> Shared Scan (share slice:id 4:0)
+ Output: share0_ref1.a, share0_ref1.b
+ -> Result
+ Output: 15, 'merge_source_cte val'::text
+ InitPlan 2 (returns $1) (slice5)
+ -> Subquery Scan on merge_source_cte_1
+ Output: ((merge_source_cte_1.*)::text || ' merge insert'::text)
+ -> Shared Scan (share slice:id 5:0)
+ Output: share0_ref2.a, share0_ref2.b
+ -> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
+ Output: m.ctid, m.gp_segment_id, o.a, o.b, o.*
+ -> Split Merge
+ Output: m.ctid, m.gp_segment_id, o.a, o.b, o.*
+ -> Hash Right Join
+ Output: m.ctid, m.gp_segment_id, o.a, o.b, o.*
+ Hash Cond: (m.k = o.a)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Output: m.ctid, m.gp_segment_id, m.k
+ Hash Key: m.k
+ -> Seq Scan on public.m
+ Output: m.ctid, m.gp_segment_id, m.k
+ -> Hash
+ Output: o.a, o.b, o.*
+ -> Redistribute Motion 1:3 (slice3)
+ Output: o.a, o.b, o.*
+ Hash Key: o.a
+ -> Subquery Scan on o
+ Output: o.a, o.b, o.*
+ -> Shared Scan (share slice:id 3:0)
+ Output: share0_ref3.a, share0_ref3.b
+
+DROP TABLE m;
-- check that run to completion happens in proper ordering
TRUNCATE TABLE y;
INSERT INTO y SELECT generate_series(1, 3);
+LINE 1: INSERT INTO y SELECT generate_series(1, 3);
+ ^
CREATE TEMPORARY TABLE yy (a INTEGER);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
@@ -2915,13 +3299,6 @@ ERROR: only one modifying WITH clause allowed per query
DETAIL: Apache Cloudberry currently only support CTEs with one writable clause.
HINT: Rewrite the query to only include one writable CTE clause.
SELECT * FROM y;
- a
----
- 1
- 3
- 2
-(3 rows)
-
SELECT * FROM yy;
a
---
@@ -2937,15 +3314,8 @@ ERROR: only one modifying WITH clause allowed per query
DETAIL: Apache Cloudberry currently only support CTEs with one writable clause.
HINT: Rewrite the query to only include one writable CTE clause.
SELECT * FROM y;
- a
----
- 1
- 3
- 2
-(3 rows)
-
SELECT * FROM yy;
- a
+ a
---
(0 rows)
@@ -3162,7 +3532,7 @@ SELECT * FROM parent;
-- check EXPLAIN VERBOSE for a wCTE with RETURNING
EXPLAIN (VERBOSE, COSTS OFF)
WITH wcte AS ( INSERT INTO int8_tbl VALUES ( 42, 47 ) RETURNING q2 )
-DELETE FROM a USING wcte WHERE aa = q2;
+DELETE FROM a_star USING wcte WHERE aa = q2;
ERROR: writable CTE queries cannot be themselves writable
DETAIL: Apache Cloudberry currently only support CTEs with one writable clause, called in a non-writable context.
HINT: Rewrite the query to only include one writable clause.
@@ -3181,50 +3551,66 @@ WITH t AS (
INSERT INTO y VALUES(0)
)
SELECT * FROM t;
-ERROR: WITH query "t" does not have a RETURNING clause
-LINE 4: SELECT * FROM t;
- ^
+ERROR: relation "y" does not exist
+LINE 2: INSERT INTO y VALUES(0)
+ ^
-- data-modifying WITH allowed only at the top level
SELECT * FROM (
WITH t AS (UPDATE y SET a=a+1 RETURNING *)
SELECT * FROM t
) ss;
-ERROR: WITH clause containing a data-modifying statement must be at the top level
+ERROR: relation "y" does not exist
LINE 2: WITH t AS (UPDATE y SET a=a+1 RETURNING *)
- ^
+ ^
-- most variants of rules aren't allowed
CREATE RULE y_rule AS ON INSERT TO y WHERE a=0 DO INSTEAD DELETE FROM y;
+ERROR: relation "y" does not exist
WITH t AS (
INSERT INTO y VALUES(0)
)
VALUES(FALSE);
-ERROR: conditional DO INSTEAD rules are not supported for data-modifying statements in WITH
+ERROR: relation "y" does not exist
+LINE 2: INSERT INTO y VALUES(0)
+ ^
CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO INSTEAD NOTHING;
+ERROR: relation "y" does not exist
WITH t AS (
INSERT INTO y VALUES(0)
)
VALUES(FALSE);
-ERROR: DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH
+ERROR: relation "y" does not exist
+LINE 2: INSERT INTO y VALUES(0)
+ ^
CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO INSTEAD NOTIFY foo;
+ERROR: relation "y" does not exist
WITH t AS (
INSERT INTO y VALUES(0)
)
VALUES(FALSE);
-ERROR: DO INSTEAD NOTIFY rules are not supported for data-modifying statements in WITH
+ERROR: relation "y" does not exist
+LINE 2: INSERT INTO y VALUES(0)
+ ^
CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO ALSO NOTIFY foo;
+ERROR: relation "y" does not exist
WITH t AS (
INSERT INTO y VALUES(0)
)
VALUES(FALSE);
-ERROR: DO ALSO rules are not supported for data-modifying statements in WITH
+ERROR: relation "y" does not exist
+LINE 2: INSERT INTO y VALUES(0)
+ ^
CREATE OR REPLACE RULE y_rule AS ON INSERT TO y
DO INSTEAD (NOTIFY foo; NOTIFY bar);
+ERROR: relation "y" does not exist
WITH t AS (
INSERT INTO y VALUES(0)
)
VALUES(FALSE);
-ERROR: multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH
+ERROR: relation "y" does not exist
+LINE 2: INSERT INTO y VALUES(0)
+ ^
DROP RULE y_rule ON y;
+ERROR: relation "y" does not exist
-- check that parser lookahead for WITH doesn't cause any odd behavior
create table foo (with baz); -- fail, WITH is a reserved word
ERROR: syntax error at or near "with"
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
index 442fbd0a7f4..18ad49e41b6 100644
--- a/src/test/regress/sql/privileges.sql
+++ b/src/test/regress/sql/privileges.sql
@@ -1272,30 +1272,7 @@ BEGIN
EXCEPTION WHEN OTHERS THEN
RETURN 2;
END$$;
-CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c;
--- start_ignore
--- GPDB_14_MERGE_FIXME: the following command will abort the sub-transaction
--- in a DDL. The problem is that aborting the sub-transaction will also erase
--- the `dispatch_oids` needed by the QEs. It's a rare case.
--- We don't support this case now
-CREATE UNIQUE INDEX ON sro_index_mv (c) WHERE unwanted_grant_nofail(1) > 0;
--- end_ignore
-\c -
--- REFRESH MATERIALIZED VIEW CONCURRENTLY sro_index_mv;
-REFRESH MATERIALIZED VIEW sro_index_mv;
-
--- REFRESH MATERIALIZED VIEW CONCURRENTLY use of eval_const_expressions()
-SET SESSION AUTHORIZATION regress_sro_user;
-CREATE FUNCTION unwanted_grant_nofail(int) RETURNS int
- IMMUTABLE LANGUAGE plpgsql AS $$
-BEGIN
- PERFORM unwanted_grant();
- RAISE WARNING 'owned';
- RETURN 1;
-EXCEPTION WHEN OTHERS THEN
- RETURN 2;
-END$$;
-CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c;
+CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c DISTRIBUTED BY (c);
CREATE UNIQUE INDEX ON sro_index_mv (c) WHERE unwanted_grant_nofail(1) > 0;
\c -
REFRESH MATERIALIZED VIEW CONCURRENTLY sro_index_mv;
diff --git a/src/test/regress/sql/qp_query_execution.sql b/src/test/regress/sql/qp_query_execution.sql
index 1a6ecda40aa..5544dbee94f 100644
--- a/src/test/regress/sql/qp_query_execution.sql
+++ b/src/test/regress/sql/qp_query_execution.sql
@@ -156,7 +156,7 @@ analyze foo_p;
analyze bar;
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6;', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6;', 'Nested Loop Left Join', 'Hash Right Join');
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6 order by 1, 2 desc limit 10;
select qx_count_operator('select foo_p.k, foo_p.t from foo_p left outer join bar on foo_p.k = bar.k where foo_p.t is not null and foo_p.p = 6;', 'Hash Right Join', 'Hash Right Join');
@@ -212,7 +212,7 @@ analyze b;
select qx_count_operator('select abbp.k, abbp.t from abbp left outer join b on abbp.k = b.k where abbp.t is not null and abbp.p = 6;', 'Hash Right Join', 'Hash Right Join');
select abbp.k, abbp.t from abbp left outer join b on abbp.k = b.k where abbp.t is not null and abbp.p = 6 order by 1, 2 desc limit 10;
-select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Nested Loop Left Join', 'Hash Right Join');
select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = '6SOME NUMBER' order by 1, 2 desc limit 10;
-- Varchar in the select list with a broadcast on top of an append with flow node
@@ -253,7 +253,7 @@ analyze b;
select qx_count_operator('select abbp.k, abbp.t from abbp left outer join b on abbp.k = b.k where abbp.t is not null and abbp.p = 6;', 'Hash Right Join', 'Hash Left Join');
select abbp.k, abbp.t from abbp left outer join b on abbp.k = b.k where abbp.t is not null and abbp.p = 6 order by 1, 2 desc limit 10;
-select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = 6;', 'Hash Right Join', 'Hash Left Join');
+select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = 6;', 'Nested Loop Left Join', 'Hash Left Join');
select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = 6 order by 1, 2 asc limit 10;
-- Partitioned tables with decimal type distribution keys
@@ -277,7 +277,7 @@ analyze bar;
select qx_count_operator('select foo_p.k, foo_p.t from foo_p left outer join bar on foo_p.k = bar.k where foo_p.t is not null and foo_p.p = 6;', 'Hash Right Join', 'Hash Right Join');
select foo_p.k, foo_p.t from foo_p left outer join bar on foo_p.k = bar.k where foo_p.t is not null and foo_p.p = 6 order by 1, 2 desc limit 10;
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6.00;', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6.00;', 'Nested Loop Left Join', 'Hash Right Join');
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6.00 order by 1, 2 desc limit 10;
-- Partitioned tables with character type distribution keys used in predicates
@@ -301,7 +301,7 @@ analyze b;
select qx_count_operator('select abbp.k, abbp.t from abbp left outer join b on abbp.k = b.k where abbp.t is not null and abbp.p = 6;', 'Hash Right Join', 'Hash Right Join');
select abbp.k, abbp.t from abbp left outer join b on abbp.k = b.k where abbp.t is not null and abbp.p = 6 order by 1, 2 asc limit 10;
-select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Nested Loop Left Join', 'Hash Right Join');
select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t is not null and abbp.a = '6SOME NUMBER' order by 1, 2 asc limit 10;
-- Partitioned tables on both sides of a join
@@ -325,7 +325,7 @@ analyze bar_p;
select qx_count_operator('select foo_p.k, foo_p.t from foo_p left outer join bar_p on foo_p.k = bar_p.k where foo_p.t is not null and foo_p.p = 6;', 'Hash Right Join', 'Hash Right Join');
select foo_p.k, foo_p.t from foo_p left outer join bar_p on foo_p.k = bar_p.k where foo_p.t is not null and foo_p.p = 6 order by 1, 2 desc limit 10;
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k where foo_p.t is not null and foo_p.a = 6;', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k where foo_p.t is not null and foo_p.a = 6;', 'Nested Loop Left Join', 'Hash Right Join');
select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k where foo_p.t is not null and foo_p.a = 6 order by 1, 2 asc limit 10;
select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k and foo_p.k = bar_p.k where foo_p.t is not null and foo_p.a = 6 and bar_p.a = 14;', 'Nested Loop', 'Hash Join');
@@ -343,7 +343,7 @@ select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k a
select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k and foo_p.b = bar_p.b where foo_p.t is not null and foo_p.a = 6;', 'Hash Right Join', 'Hash Right Join');
select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k and foo_p.b = bar_p.b where foo_p.t is not null and foo_p.a = 6 order by 1, 2 desc limit 10;
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.a where foo_p.t is not null and foo_p.a = 6;', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.a where foo_p.t is not null and foo_p.a = 6;', 'Nested Loop Left Join', 'Hash Right Join');
select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.a where foo_p.t is not null and foo_p.a = 6 order by 1, 2 asc limit 10;
-- Queries where equality predicate is not an immediate constant
@@ -363,11 +363,11 @@ insert into bar select i % 7, i % 6, i % 9, i || 'SOME NUMBER', i % 4 from gener
analyze foo_p;
analyze bar;
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = (array[1])[1];', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = (array[1])[1];', 'Nested Loop Left Join', 'Hash Right Join');
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = (array[1])[1] order by 1, 2 desc limit 10;
create function mytest(integer) returns integer as 'select $1/100' language sql;
-select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = mytest(100);', 'Hash Right Join', 'Hash Right Join');
+select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = mytest(100);', 'Nested Loop Left Join', 'Hash Right Join');
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = mytest(100) order by 1, 2 asc limit 10;
drop function if exists mytest(integer);
diff --git a/src/test/regress/sql/random.sql b/src/test/regress/sql/random.sql
index 14cc76bc3c6..44a9a19669c 100644
--- a/src/test/regress/sql/random.sql
+++ b/src/test/regress/sql/random.sql
@@ -38,7 +38,7 @@ BEGIN
WITH samples AS (
SELECT random() r FROM generate_series(1, n) ORDER BY 1
), indexed_samples AS (
- SELECT (row_number() OVER())-1.0 i, r FROM samples
+ SELECT (row_number(ORDER BY r) OVER())-1.0 i, r FROM samples
)
SELECT max(abs(i/n-r)) < c / sqrt(n) FROM indexed_samples
);
@@ -84,7 +84,7 @@ BEGIN
WITH samples AS (
SELECT random_normal() r FROM generate_series(1, n) ORDER BY 1
), indexed_samples AS (
- SELECT (row_number() OVER())-1.0 i, r FROM samples
+ SELECT (row_number() OVER(ORDER BY r))-1.0 i, r FROM samples
)
SELECT max(abs((1+erf(r/sqrt(2)))/2 - i/n)) < c / sqrt(n)
FROM indexed_samples