diff --git a/configure.ac b/configure.ac
index 246edc4846e..9c4a2efad6c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -21,7 +21,7 @@ dnl The PACKAGE_VERSION from upstream PostgreSQL is maintained in the
dnl PG_PACKAGE_VERSION variable, when merging make sure to update this
dnl variable with the merge conflict from the AC_INIT() statement.
AC_INIT([Apache Cloudberry], [3.0.0-devel], [dev@cloudberry.apache.org], [], [https://cloudberry.apache.org/])
-[PG_PACKAGE_VERSION=14.4]
+[PG_PACKAGE_VERSION=14.5]
AC_SUBST(PG_PACKAGE_VERSION)
dnl m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required.
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 655bdced4f1..2874e69c7e5 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -6486,6 +6486,29 @@ SELECT * FROM foreign_tbl;
20 | 30
(1 row)
+-- We don't allow batch insert when there are any WCO constraints
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15), (0, 5);
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Insert on public.foreign_tbl
+ Remote SQL: INSERT INTO public.base_tbl(a, b) VALUES ($1, $2) RETURNING a, b
+ Batch Size: 1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1, "*VALUES*".column2
+(5 rows)
+
+INSERT INTO rw_view VALUES (0, 15), (0, 5); -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (10, 5).
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 20 | 30
+(1 row)
+
+ALTER SERVER loopback OPTIONS (DROP batch_size);
DROP FOREIGN TABLE foreign_tbl CASCADE;
NOTICE: drop cascades to view rw_view
DROP TRIGGER row_before_insupd_trigger ON base_tbl;
@@ -6578,6 +6601,27 @@ SELECT * FROM foreign_tbl;
20 | 30
(1 row)
+-- We don't allow batch insert when there are any WCO constraints
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15), (0, 5);
+ QUERY PLAN
+--------------------------------------------------------
+ Insert on public.parent_tbl
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1, "*VALUES*".column2
+(3 rows)
+
+INSERT INTO rw_view VALUES (0, 15), (0, 5); -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (10, 5).
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 20 | 30
+(1 row)
+
+ALTER SERVER loopback OPTIONS (DROP batch_size);
DROP FOREIGN TABLE foreign_tbl CASCADE;
DROP TRIGGER row_before_insupd_trigger ON child_tbl;
DROP TABLE parent_tbl CASCADE;
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index fbbd867c239..e6ea4cc655c 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -2046,8 +2046,9 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
batch_size = get_batch_size_option(resultRelInfo->ri_RelationDesc);
/*
- * Disable batching when we have to use RETURNING or there are any
- * BEFORE/AFTER ROW INSERT triggers on the foreign table.
+ * Disable batching when we have to use RETURNING, there are any
+ * BEFORE/AFTER ROW INSERT triggers on the foreign table, or there are any
+ * WITH CHECK OPTION constraints from parent views.
*
* When there are any BEFORE ROW INSERT triggers on the table, we can't
* support it, because such triggers might query the table we're inserting
@@ -2055,6 +2056,7 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
* and prepared for insertion are not there.
*/
if (resultRelInfo->ri_projectReturning != NULL ||
+ resultRelInfo->ri_WithCheckOptions != NIL ||
(resultRelInfo->ri_TrigDesc &&
(resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
resultRelInfo->ri_TrigDesc->trig_insert_after_row)))
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index 724c1802b28..3663ca3bdf4 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -1442,6 +1442,14 @@ UPDATE rw_view SET b = b + 15;
UPDATE rw_view SET b = b + 15; -- ok
SELECT * FROM foreign_tbl;
+-- We don't allow batch insert when there are any WCO constraints
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15), (0, 5);
+INSERT INTO rw_view VALUES (0, 15), (0, 5); -- should fail
+SELECT * FROM foreign_tbl;
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+
DROP FOREIGN TABLE foreign_tbl CASCADE;
DROP TRIGGER row_before_insupd_trigger ON base_tbl;
DROP TABLE base_tbl;
@@ -1480,6 +1488,14 @@ UPDATE rw_view SET b = b + 15;
UPDATE rw_view SET b = b + 15; -- ok
SELECT * FROM foreign_tbl;
+-- We don't allow batch insert when there are any WCO constraints
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15), (0, 5);
+INSERT INTO rw_view VALUES (0, 15), (0, 5); -- should fail
+SELECT * FROM foreign_tbl;
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+
DROP FOREIGN TABLE foreign_tbl CASCADE;
DROP TRIGGER row_before_insupd_trigger ON child_tbl;
DROP TABLE parent_tbl CASCADE;
diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml
index 4ee8908b65a..71697155d7c 100644
--- a/doc/src/sgml/brin.sgml
+++ b/doc/src/sgml/brin.sgml
@@ -75,7 +75,7 @@
summarized will cause the summary information to be updated with data
from the new tuples.
When a new page is created that does not fall within the last
- summarized range, the range that the new page belongs into
+ summarized range, the range that the new page belongs to
does not automatically acquire a summary tuple;
those tuples remain unsummarized until a summarization run is
invoked later, creating the initial summary for that range.
diff --git a/doc/src/sgml/ref/drop_extension.sgml b/doc/src/sgml/ref/drop_extension.sgml
index 5e507dec928..484e5d9b11a 100644
--- a/doc/src/sgml/ref/drop_extension.sgml
+++ b/doc/src/sgml/ref/drop_extension.sgml
@@ -30,7 +30,10 @@ DROP EXTENSION [ IF EXISTS ] name [
DROP EXTENSION removes extensions from the database.
- Dropping an extension causes its component objects to be dropped as well.
+ Dropping an extension causes its member objects, and other explicitly
+ dependent routines (see ,
+ the DEPENDS ON EXTENSION extension_name
+ action), to be dropped as well.
@@ -77,9 +80,9 @@ DROP EXTENSION [ IF EXISTS ] name [
RESTRICT
- Refuse to drop the extension if any objects depend on it (other than
- its own member objects and other extensions listed in the same
- DROP command). This is the default.
+ This option prevents the specified extensions from being dropped if
+ other objects, besides these extensions, their members, and their
+ explicitly dependent routines, depend on them. This is the default.
diff --git a/doc/src/sgml/release-14.sgml b/doc/src/sgml/release-14.sgml
index 95a6bfe037f..b5f91109812 100644
--- a/doc/src/sgml/release-14.sgml
+++ b/doc/src/sgml/release-14.sgml
@@ -1,6 +1,942 @@
+
+ Release 14.5
+
+
+ Release date:
+ 2022-08-11
+
+
+
+ This release contains a variety of fixes from 14.4.
+ For information about new features in major release 14, see
+ .
+
+
+
+ Migration to Version 14.5
+
+
+ A dump/restore is not required for those running 14.X.
+
+
+
+ However, if you are upgrading from a version earlier than 14.4,
+ see .
+
+
+
+
+ Changes
+
+
+
+
+
+
+ Do not let extension scripts replace objects not already belonging
+ to the extension (Tom Lane)
+
+
+
+ This change prevents extension scripts from doing CREATE
+ OR REPLACE if there is an existing object that does not
+ belong to the extension. It also prevents CREATE IF NOT
+ EXISTS in the same situation. This prevents a form of
+ trojan-horse attack in which a hostile database user could become
+ the owner of an extension object and then modify it to compromise
+ future uses of the object by other users. As a side benefit, it
+ also reduces the risk of accidentally replacing objects one did
+ not mean to.
+
+
+
+ The PostgreSQL Project thanks
+ Sven Klemm for reporting this problem.
+ (CVE-2022-2625)
+
+
+
+
+
+
+ Fix replay of CREATE DATABASE WAL
+ records on standby servers
+ (Kyotaro Horiguchi, Asim R Praveen, Paul Guo)
+
+
+
+ Standby servers may encounter missing tablespace directories
+ when replaying database-creation WAL records. Prior to this
+ patch, a standby would fail to recover in such a case;
+ however, such directories could be legitimately missing.
+ Create the tablespace (as a plain directory), then check that it
+ has been dropped again once replay reaches a consistent state.
+
+
+
+
+
+
+ Support in place tablespaces
+ (Thomas Munro, Michael Paquier, Álvaro Herrera)
+
+
+
+ Normally a Postgres tablespace is a symbolic link to a directory on
+ some other filesystem. This change allows it to just be a plain
+ directory. While this has no use for separating tables onto
+ different filesystems, it is a convenient setup for testing.
+ Moreover, it is necessary to support the CREATE
+ DATABASE replay fix, which transiently creates a missing
+ tablespace as an in place tablespace.
+
+
+
+
+
+
+ Fix permissions checks in CREATE INDEX (Nathan
+ Bossart, Noah Misch)
+
+
+
+ The fix for CVE-2022-1552 caused CREATE INDEX to
+ apply the table owner's permissions while performing lookups of
+ operator classes and other objects, where formerly the calling
+ user's permissions were used. This broke dump/restore scenarios,
+ because pg_dump issues CREATE
+ INDEX before re-granting permissions.
+
+
+
+
+
+
+ In extended query protocol, force an immediate commit
+ after CREATE DATABASE and other commands that
+ can't run in a transaction block (Tom Lane)
+
+
+
+ If the client does not send a Sync message immediately after such a
+ command, but instead sends another command, any failure in that
+ command would lead to rolling back the preceding command, typically
+ leaving inconsistent state on-disk (such as a missing or extra
+ database directory). The mechanisms intended to prevent that
+ situation turn out to work for multiple commands in a simple-Query
+ message, but not for a series of extended-protocol messages. To
+ prevent inconsistency without breaking use-cases that work today,
+ force an implicit commit after such commands.
+
+
+
+
+
+
+ Fix race condition when checking transaction visibility (Simon Riggs)
+
+
+
+ TransactionIdIsInProgress could
+ report false before the subject transaction is
+ considered visible, leading to various misbehaviors. The race
+ condition window is normally very narrow, but use of synchronous
+ replication makes it much wider, because the wait for a synchronous
+ replica happens in that window.
+
+
+
+
+
+
+ Fix incorrect plans when sorting by an expression that contains a
+ non-top-level set-returning function (Richard Guo, Tom Lane)
+
+
+
+
+
+
+ Fix incorrect permissions-checking code for extended statistics
+ (Richard Guo)
+
+
+
+ If there are extended statistics on a table that the user has only
+ partial SELECT permissions on, some queries would
+ fail with unrecognized node type errors.
+
+
+
+
+
+
+ Fix extended statistics machinery to handle MCV-type statistics on
+ boolean-valued expressions (Tom Lane)
+
+
+
+ Statistics collection worked fine, but a query containing such an
+ expression in WHERE would fail
+ with unknown clause type.
+
+
+
+
+
+
+ Avoid planner core dump with constant
+ = ANY(array) clauses when
+ there are MCV-type extended statistics on
+ the array variable (Tom Lane)
+
+
+
+
+
+
+ Fix ALTER TABLE ... ENABLE/DISABLE TRIGGER to
+ handle recursion correctly for triggers on partitioned tables
+ (Álvaro Herrera, Amit Langote)
+
+
+
+ In certain cases, a trigger does not exist failure
+ would occur because the command would try to adjust the trigger on a
+ child partition that doesn't have it.
+
+
+
+
+
+
+ Allow cancellation of ANALYZE while it is
+ computing extended statistics (Tom Lane, Justin Pryzby)
+
+
+
+ In some scenarios with high statistics targets, it was possible to
+ spend many seconds in an un-cancellable sort operation.
+
+
+
+
+
+
+ Improve syntax error messages for type jsonpath
+ (Andrew Dunstan)
+
+
+
+
+
+
+ Ensure that pg_stop_backup() cleans up session
+ state properly (Fujii Masao)
+
+
+
+ This omission could lead to assertion failures or crashes later in
+ the session.
+
+
+
+
+
+
+ Fix trim_array() to handle a zero-dimensional
+ array argument sanely (Martin Kalcher)
+
+
+
+
+
+
+ Fix join alias matching in FOR [KEY] UPDATE/SHARE
+ clauses (Dean Rasheed)
+
+
+
+ In corner cases, a misleading error could be reported.
+
+
+
+
+
+
+ Reject ROW() expressions and functions
+ in FROM that have too many columns (Tom Lane)
+
+
+
+ Cases with more than about 1600 columns are unsupported, and
+ have always failed at execution. However, it emerges that some
+ earlier code could be driven to assertion failures or crashes by
+ queries with more than 32K columns. Add a parse-time check to
+ prevent that.
+
+
+
+
+
+
+ Fix dumping of a view using a function in FROM
+ that returns a composite type, when column(s) of the composite type
+ have been dropped since the view was made (Tom Lane)
+
+
+
+ This oversight could lead to dump/reload
+ or pg_upgrade failures, as the dumped
+ view would have too many column aliases for the function.
+
+
+
+
+
+
+ Disallow nested backup operations in logical replication walsenders
+ (Fujii Masao)
+
+
+
+
+
+
+ Fix memory leak in logical replication subscribers (Hou Zhijie)
+
+
+
+
+
+
+ Fix logical replication's checking of replica identity when the
+ target table is partitioned (Shi Yu, Hou Zhijie)
+
+
+
+ The replica identity columns have to be re-identified for the child
+ partition.
+
+
+
+
+
+
+ Fix failures to update cached schema data in a logical replication
+ subscriber after a schema change on the publisher (Shi Yu, Hou
+ Zhijie)
+
+
+
+
+
+
+ Fix WAL consistency checking logic to correctly
+ handle BRIN_EVACUATE_PAGE flags (Haiyang Wang)
+
+
+
+
+
+
+ Fix erroneous assertion checks in shared hashtable management
+ (Thomas Munro)
+
+
+
+
+
+
+ Avoid assertion failure
+ when min_dynamic_shared_memory is set to a
+ non-default value (Thomas Munro)
+
+
+
+
+
+
+ Arrange to clean up after commit-time errors
+ within SPI_commit(), rather than expecting
+ callers to do that (Peter Eisentraut, Tom Lane)
+
+
+
+ Proper cleanup is complicated and requires use of low-level
+ facilities, so it's not surprising that no known caller got it
+ right. This led to misbehaviors when a PL procedure
+ issued COMMIT but a failure occurred (such as a
+ deferred constraint check). To improve matters,
+ redefine SPI_commit() as starting a new
+ transaction, so that it becomes equivalent
+ to SPI_commit_and_chain() except that you get
+ default transaction characteristics instead of preserving the prior
+ transaction's characteristics. To make this somewhat transparent
+ API-wise, redefine SPI_start_transaction() as a
+ no-op. All known callers of SPI_commit()
+ immediately call SPI_start_transaction(), so
+ they will not notice any change. Similar remarks apply
+ to SPI_rollback().
+
+
+
+ Also fix PL/Python, which omitted any handling of such errors at all,
+ resulting in jumping out of the Python interpreter. This is
+ reported to crash Python 3.11. Older Python releases leak some
+ memory but seem okay with it otherwise.
+
+
+
+
+
+
+ Improve libpq's handling of idle states
+ in pipeline mode (Álvaro Herrera, Kyotaro Horiguchi)
+
+
+
+ This fixes message type 0x33 arrived from server while
+ idle warnings, as well as possible loss of end-of-query NULL
+ results from PQgetResult().
+
+
+
+
+
+
+ Avoid core dump in ecpglib with
+ unexpected orders of operations (Tom Lane)
+
+
+
+ Certain operations such as EXEC SQL PREPARE would
+ crash (rather than reporting an error as expected) if called before
+ establishing any database connection.
+
+
+
+
+
+
+ In ecpglib, avoid
+ redundant newlocale() calls (Noah Misch)
+
+
+
+ Allocate a C locale object once per process when first connecting,
+ rather than creating and freeing locale objects once per query.
+ This mitigates a libc memory leak on AIX, and may offer some
+ performance benefit everywhere.
+
+
+
+
+
+
+ In psql's \watch
+ command, echo a newline after cancellation with control-C
+ (Pavel Stehule)
+
+
+
+ This prevents libedit (and possibly also libreadline) from becoming
+ confused about which column the cursor is in.
+
+
+
+
+
+
+ Fix pg_upgrade to detect non-upgradable
+ usages of functions taking anyarray (Justin Pryzby)
+
+
+
+ Version 14 changed some built-in functions to take
+ type anycompatiblearray instead
+ of anyarray. While this is mostly transparent,
+ user-defined aggregates and operators built atop these functions
+ have to be declared with exactly matching types. The presence of an
+ object referencing the old signature will
+ cause pg_upgrade to fail, so change it to
+ detect and report such cases before beginning the upgrade.
+
+
+
+
+
+
+ Fix possible report of wrong error condition
+ after clone() failure
+ in pg_upgrade
+ with option (Justin Pryzby)
+
+
+
+
+
+
+ Fix contrib/pg_stat_statements to avoid
+ problems with very large query-text files on 32-bit platforms
+ (Tom Lane)
+
+
+
+
+
+
+ In contrib/postgres_fdw, prevent batch
+ insertion when there are WITH CHECK OPTION
+ constraints (Etsuro Fujita)
+
+
+
+ Such constraints cannot be checked properly if more than one row is
+ inserted at a time.
+
+
+
+
+
+
+ Fix contrib/postgres_fdw to detect failure to
+ send an asynchronous data fetch query (Fujii Masao)
+
+
+
+
+
+
+ Ensure that contrib/postgres_fdw sends
+ constants of regconfig and other reg*
+ types with proper schema qualification (Tom Lane)
+
+
+
+
+
+
+ Block signals while allocating dynamic shared memory on Linux
+ (Thomas Munro)
+
+
+
+ This avoids problems when a signal
+ interrupts posix_fallocate().
+
+
+
+
+
+
+ Detect unexpected EEXIST error
+ from shm_open() (Thomas Munro)
+
+
+
+ This avoids a possible crash on Solaris.
+
+
+
+
+
+
+ Avoid using signalfd()
+ on illumos systems (Thomas Munro)
+
+
+
+ This appears to trigger hangs and kernel panics, so avoid the
+ function until a fix is available.
+
+
+
+
+
+
+
+
Release 14.4
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 07f00a212b0..fd6f5e23173 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -515,7 +515,8 @@ static void ATExecSetRelOptions(Relation rel, List *defList,
Oid newAccessMethod,
LOCKMODE lockmode);
static void ATExecEnableDisableTrigger(Relation rel, const char *trigname,
- char fires_when, bool skip_system, LOCKMODE lockmode);
+ char fires_when, bool skip_system, bool recurse,
+ LOCKMODE lockmode);
static void ATExecEnableDisableRule(Relation rel, const char *rulename,
char fires_when, LOCKMODE lockmode);
static void ATPrepAddInherit(Relation child_rel);
@@ -4774,9 +4775,7 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
* be done in this phase. Generally, this phase acquires table locks,
* checks permissions and relkind, and recurses to find child tables.
*
- * ATRewriteCatalogs performs phase 2 for each affected table. (Note that
- * phases 2 and 3 normally do no explicit recursion, since phase 1 already
- * did it --- although some subcommands have to recurse in phase 2 instead.)
+ * ATRewriteCatalogs performs phase 2 for each affected table.
* Certain subcommands need to be performed before others to avoid
* unnecessary conflicts; for example, DROP COLUMN should come before
* ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
@@ -4784,6 +4783,12 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
*
* ATRewriteTables performs phase 3 for those tables that need it.
*
+ * For most subcommand types, phases 2 and 3 do no explicit recursion,
+ * since phase 1 already does it. However, for certain subcommand types
+ * it is only possible to determine how to recurse at phase 2 time; for
+ * those cases, phase 1 sets the cmd->recurse flag (or, in some older coding,
+ * changes the command subtype of a "Recurse" variant XXX to be cleaned up.)
+ *
* Thanks to the magic of MVCC, an error anywhere along the way rolls back
* the whole operation; we don't have to do anything special to clean up.
*
@@ -5384,10 +5389,10 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
errhint("Use ALTER TABLE ... DETACH PARTITION ... FINALIZE to complete the pending detach operation."));
/*
- * Copy the original subcommand for each table. This avoids conflicts
- * when different child tables need to make different parse
- * transformations (for example, the same column may have different column
- * numbers in different children).
+ * Copy the original subcommand for each table, so we can scribble on it.
+ * This avoids conflicts when different child tables need to make
+ * different parse transformations (for example, the same column may have
+ * different column numbers in different children).
*/
cmd = copyObject(cmd);
@@ -5866,6 +5871,9 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE | ATT_DIRECTORY_TABLE);
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode, context);
+ /* Set up recursion for phase 2; no other prep needed */
+ if (recurse)
+ cmd->recurse = true;
pass = AT_PASS_MISC;
break;
case AT_EnableRule: /* ENABLE/DISABLE RULE variants */
@@ -6280,35 +6288,51 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab,
break;
case AT_EnableTrig: /* ENABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
+ TRIGGER_FIRES_ON_ORIGIN, false,
+ cmd->recurse,
+ lockmode);
break;
case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_FIRES_ALWAYS, false, lockmode);
+ TRIGGER_FIRES_ALWAYS, false,
+ cmd->recurse,
+ lockmode);
break;
case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_FIRES_ON_REPLICA, false, lockmode);
+ TRIGGER_FIRES_ON_REPLICA, false,
+ cmd->recurse,
+ lockmode);
break;
case AT_DisableTrig: /* DISABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_DISABLED, false, lockmode);
+ TRIGGER_DISABLED, false,
+ cmd->recurse,
+ lockmode);
break;
case AT_EnableTrigAll: /* ENABLE TRIGGER ALL */
ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
+ TRIGGER_FIRES_ON_ORIGIN, false,
+ cmd->recurse,
+ lockmode);
break;
case AT_DisableTrigAll: /* DISABLE TRIGGER ALL */
ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_DISABLED, false, lockmode);
+ TRIGGER_DISABLED, false,
+ cmd->recurse,
+ lockmode);
break;
case AT_EnableTrigUser: /* ENABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_FIRES_ON_ORIGIN, true, lockmode);
+ TRIGGER_FIRES_ON_ORIGIN, true,
+ cmd->recurse,
+ lockmode);
break;
case AT_DisableTrigUser: /* DISABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_DISABLED, true, lockmode);
+ TRIGGER_DISABLED, true,
+ cmd->recurse,
+ lockmode);
break;
case AT_EnableRule: /* ENABLE RULE name */
@@ -16812,9 +16836,11 @@ index_copy_data(Relation rel, RelFileNode newrnode)
*/
static void
ATExecEnableDisableTrigger(Relation rel, const char *trigname,
- char fires_when, bool skip_system, LOCKMODE lockmode)
+ char fires_when, bool skip_system, bool recurse,
+ LOCKMODE lockmode)
{
- EnableDisableTrigger(rel, trigname, fires_when, skip_system, lockmode);
+ EnableDisableTriggerNew(rel, trigname, fires_when, skip_system, recurse,
+ lockmode);
/* MPP-6929: metadata tracking */
if (Gp_role == GP_ROLE_DISPATCH && MetaTrackValidKindNsp(rel->rd_rel))
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 57a258102c5..adc2da4b538 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -1624,14 +1624,16 @@ renametrig(RenameStmt *stmt)
* enablement/disablement, this also defines when the trigger
* should be fired in session replication roles.
* skip_system: if true, skip "system" triggers (constraint triggers)
+ * recurse: if true, recurse to partitions
*
* Caller should have checked permissions for the table; here we also
* enforce that superuser privilege is required to alter the state of
* system triggers
*/
void
-EnableDisableTrigger(Relation rel, const char *tgname,
- char fires_when, bool skip_system, LOCKMODE lockmode)
+EnableDisableTriggerNew(Relation rel, const char *tgname,
+ char fires_when, bool skip_system, bool recurse,
+ LOCKMODE lockmode)
{
Relation tgrel;
int nkeys;
@@ -1697,6 +1699,34 @@ EnableDisableTrigger(Relation rel, const char *tgname,
changed = true;
}
+ /*
+ * When altering FOR EACH ROW triggers on a partitioned table, do the
+ * same on the partitions as well, unless ONLY is specified.
+ *
+ * Note that we recurse even if we didn't change the trigger above,
+ * because the partitions' copy of the trigger may have a different
+ * value of tgenabled than the parent's trigger and thus might need to
+ * be changed.
+ */
+ if (recurse &&
+ rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
+ (TRIGGER_FOR_ROW(oldtrig->tgtype)))
+ {
+ PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
+ int i;
+
+ for (i = 0; i < partdesc->nparts; i++)
+ {
+ Relation part;
+
+ part = relation_open(partdesc->oids[i], lockmode);
+ EnableDisableTriggerNew(part, NameStr(oldtrig->tgname),
+ fires_when, skip_system, recurse,
+ lockmode);
+ table_close(part, NoLock); /* keep lock till commit */
+ }
+ }
+
InvokeObjectPostAlterHook(TriggerRelationId,
oldtrig->oid, 0);
}
@@ -1720,6 +1750,19 @@ EnableDisableTrigger(Relation rel, const char *tgname,
CacheInvalidateRelcache(rel);
}
+/*
+ * ABI-compatible wrapper for the above. To keep as close possible to the old
+ * behavior, this never recurses. Do not call this function in new code.
+ */
+void
+EnableDisableTrigger(Relation rel, const char *tgname,
+ char fires_when, bool skip_system,
+ LOCKMODE lockmode)
+{
+ EnableDisableTriggerNew(rel, tgname, fires_when, skip_system,
+ true, lockmode);
+}
+
/*
* Build trigger data to attach to the given relcache entry.
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 3a9fd32741d..680147b899d 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -4066,6 +4066,7 @@ _copyAlterTableCmd(const AlterTableCmd *from)
COPY_SCALAR_FIELD(missing_ok);
COPY_NODE_FIELD(tags);
COPY_SCALAR_FIELD(unsettag);
+ COPY_SCALAR_FIELD(recurse);
return newnode;
}
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 9981c2fd023..92f785057b1 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -1222,6 +1222,7 @@ _equalAlterTableCmd(const AlterTableCmd *a, const AlterTableCmd *b)
COMPARE_SCALAR_FIELD(missing_ok);
COMPARE_NODE_FIELD(tags);
COMPARE_SCALAR_FIELD(unsettag);
+ COMPARE_SCALAR_FIELD(recurse);
return true;
}
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 8e2b9230cb3..6c45d349524 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -754,6 +754,12 @@ expression_returns_set_walker(Node *node, void *context)
/* else fall through to check args */
}
+ /*
+ * If you add any more cases that return sets, also fix
+ * expression_returns_set_rows() in clauses.c and IS_SRF_CALL() in
+ * tlist.c.
+ */
+
/* Avoid recursion for some cases that parser checks not to return a set */
if (IsA(node, Aggref))
return false;
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index 547b2550917..6a941c662c7 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -1005,7 +1005,7 @@ relation_can_be_sorted_early(PlannerInfo *root, RelOptInfo *rel,
* one are effectively checking properties of targetexpr, so there's
* no point in asking whether some other EC member would be better.)
*/
- if (IS_SRF_CALL((Node *) em->em_expr))
+ if (expression_returns_set((Node *) em->em_expr))
continue;
/*
@@ -1033,7 +1033,7 @@ relation_can_be_sorted_early(PlannerInfo *root, RelOptInfo *rel,
* member in this case; since SRFs can't appear in WHERE, they cannot
* belong to multi-member ECs.)
*/
- if (IS_SRF_CALL((Node *) em->em_expr))
+ if (expression_returns_set((Node *) em->em_expr))
return false;
return true;
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index 9d0f3274dbe..bb16a8d2c38 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -32,6 +32,17 @@ typedef struct maxSortGroupRef_context
static
bool maxSortGroupRef_walker(Node *node, maxSortGroupRef_context *cxt);
+/*
+ * Test if an expression node represents a SRF call. Beware multiple eval!
+ *
+ * Please note that this is only meant for use in split_pathtarget_at_srfs();
+ * if you use it anywhere else, your code is almost certainly wrong for SRFs
+ * nested within expressions. Use expression_returns_set() instead.
+ */
+#define IS_SRF_CALL(node) \
+ ((IsA(node, FuncExpr) && ((FuncExpr *) (node))->funcretset) || \
+ (IsA(node, OpExpr) && ((OpExpr *) (node))->opretset))
+
/*
* Data structures for split_pathtarget_at_srfs(). To preserve the identity
* of sortgroupref items even if they are textually equal(), what we track is
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index aff0b0db05b..dda1cc3d108 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -1333,8 +1333,8 @@ choose_best_statistics(List *stats, char requiredkind,
*
* (c) combinations using AND/OR/NOT
*
- * (d) ScalarArrayOpExprs of the form (Var/Expr op ANY (array)) or (Var/Expr
- * op ALL (array))
+ * (d) ScalarArrayOpExprs of the form (Var/Expr op ANY (Const)) or
+ * (Var/Expr op ALL (Const))
*
* In the future, the range of supported clauses may be expanded to more
* complex cases, for example (Var op Var).
@@ -1454,13 +1454,18 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause,
RangeTblEntry *rte = root->simple_rte_array[relid];
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
Node *clause_expr;
+ bool expronleft;
/* Only expressions with two arguments are considered compatible. */
if (list_length(expr->args) != 2)
return false;
/* Check if the expression has the right shape (one Var, one Const) */
- if (!examine_opclause_args(expr->args, &clause_expr, NULL, NULL))
+ if (!examine_opclause_args(expr->args, &clause_expr, NULL, &expronleft))
+ return false;
+
+ /* We only support Var on left, Const on right */
+ if (!expronleft)
return false;
/*
diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c
index ef118952c74..2ed87057dc5 100644
--- a/src/backend/statistics/mcv.c
+++ b/src/backend/statistics/mcv.c
@@ -1531,13 +1531,13 @@ pg_mcv_list_send(PG_FUNCTION_ARGS)
/*
* match the attribute/expression to a dimension of the statistic
*
- * Match the attribute/expression to statistics dimension. Optionally
- * determine the collation.
+ * Returns the zero-based index of the matching statistics dimension.
+ * Optionally determines the collation.
*/
static int
mcv_match_expression(Node *expr, Bitmapset *keys, List *exprs, Oid *collid)
{
- int idx = -1;
+ int idx;
if (IsA(expr, Var))
{
@@ -1549,20 +1549,19 @@ mcv_match_expression(Node *expr, Bitmapset *keys, List *exprs, Oid *collid)
idx = bms_member_index(keys, var->varattno);
- /* make sure the index is valid */
- Assert((idx >= 0) && (idx <= bms_num_members(keys)));
+ if (idx < 0)
+ elog(ERROR, "variable not found in statistics object");
}
else
{
+ /* expression - lookup in stats expressions */
ListCell *lc;
- /* expressions are stored after the simple columns */
- idx = bms_num_members(keys);
-
if (collid)
*collid = exprCollation(expr);
- /* expression - lookup in stats expressions */
+ /* expressions are stored after the simple columns */
+ idx = bms_num_members(keys);
foreach(lc, exprs)
{
Node *stat_expr = (Node *) lfirst(lc);
@@ -1573,13 +1572,10 @@ mcv_match_expression(Node *expr, Bitmapset *keys, List *exprs, Oid *collid)
idx++;
}
- /* make sure the index is valid */
- Assert((idx >= bms_num_members(keys)) &&
- (idx <= bms_num_members(keys) + list_length(exprs)));
+ if (lc == NULL)
+ elog(ERROR, "expression not found in statistics object");
}
- Assert((idx >= 0) && (idx < bms_num_members(keys) + list_length(exprs)));
-
return idx;
}
@@ -1659,8 +1655,6 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
/* match the attribute/expression to a dimension of the statistic */
idx = mcv_match_expression(clause_expr, keys, exprs, &collid);
- Assert((idx >= 0) && (idx < bms_num_members(keys) + list_length(exprs)));
-
/*
* Walk through the MCV items and evaluate the current clause. We
* can skip items that were already ruled out, and terminate if
@@ -1746,10 +1740,14 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
if (!examine_opclause_args(expr->args, &clause_expr, &cst, &expronleft))
elog(ERROR, "incompatible clause");
- /* ScalarArrayOpExpr has the Var always on the left */
- Assert(expronleft);
+ /* We expect Var on left */
+ if (!expronleft)
+ elog(ERROR, "incompatible clause");
- /* XXX what if (cst->constisnull == NULL)? */
+ /*
+ * Deconstruct the array constant, unless it's NULL (we'll cover
+ * that case below)
+ */
if (!cst->constisnull)
{
arrayval = DatumGetArrayTypeP(cst->constvalue);
@@ -1947,7 +1945,30 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
}
}
else
- elog(ERROR, "unknown clause type: %d", clause->type);
+ {
+ /* Otherwise, it must be a bare boolean-returning expression */
+ int idx;
+
+ /* match the expression to a dimension of the statistic */
+ idx = mcv_match_expression(clause, keys, exprs, NULL);
+
+ /*
+ * Walk through the MCV items and evaluate the current clause. We
+ * can skip items that were already ruled out, and terminate if
+ * there are no remaining MCV items that might possibly match.
+ */
+ for (i = 0; i < mcvlist->nitems; i++)
+ {
+ bool match;
+ MCVItem *item = &mcvlist->items[i];
+
+ /* "match" just means it's bool TRUE */
+ match = !item->isnull[idx] && DatumGetBool(item->values[idx]);
+
+ /* now, update the match bitmap, depending on OR/AND type */
+ matches[i] = RESULT_MERGE(matches[i], is_or, match);
+ }
+ }
}
return matches;
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 26cd7458961..6d00be72b16 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -6802,7 +6802,7 @@ trim_array(PG_FUNCTION_ARGS)
{
ArrayType *v = PG_GETARG_ARRAYTYPE_P(0);
int n = PG_GETARG_INT32(1);
- int array_length = ARR_DIMS(v)[0];
+ int array_length = (ARR_NDIM(v) > 0) ? ARR_DIMS(v)[0] : 0;
int16 elmlen;
bool elmbyval;
char elmalign;
@@ -6822,8 +6822,11 @@ trim_array(PG_FUNCTION_ARGS)
/* Set all the bounds as unprovided except the first upper bound */
memset(lowerProvided, false, sizeof(lowerProvided));
memset(upperProvided, false, sizeof(upperProvided));
- upper[0] = ARR_LBOUND(v)[0] + array_length - n - 1;
- upperProvided[0] = true;
+ if (ARR_NDIM(v) > 0)
+ {
+ upper[0] = ARR_LBOUND(v)[0] + array_length - n - 1;
+ upperProvided[0] = true;
+ }
/* Fetch the needed information about the element type */
get_typlenbyvalalign(ARR_ELEMTYPE(v), &elmlen, &elmbyval, &elmalign);
diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h
index 995ba509cc8..f23aeb0741f 100644
--- a/src/include/commands/trigger.h
+++ b/src/include/commands/trigger.h
@@ -168,6 +168,9 @@ extern Oid get_trigger_oid(Oid relid, const char *name, bool missing_ok);
extern ObjectAddress renametrig(RenameStmt *stmt);
+extern void EnableDisableTriggerNew(Relation rel, const char *tgname,
+ char fires_when, bool skip_system, bool recurse,
+ LOCKMODE lockmode);
extern void EnableDisableTrigger(Relation rel, const char *tgname,
char fires_when, bool skip_system, LOCKMODE lockmode);
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 449fb3fba03..93a0e796dcc 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -2181,6 +2181,7 @@ typedef struct AlterTableCmd /* one subcommand of an ALTER TABLE */
Node *transform; /* transformation expr for ALTER TYPE */
DropBehavior behavior; /* RESTRICT or CASCADE for DROP cases */
bool missing_ok; /* skip error if missing? */
+ bool recurse; /* exec-time recursion */
/*
* Extra information dispatched from QD to QEs in AT_SetDistributedBy and
diff --git a/src/include/optimizer/optimizer.h b/src/include/optimizer/optimizer.h
index f8400206288..c2553891186 100644
--- a/src/include/optimizer/optimizer.h
+++ b/src/include/optimizer/optimizer.h
@@ -25,11 +25,6 @@
#include "nodes/parsenodes.h"
#include "nodes/plannodes.h"
-/* Test if an expression node represents a SRF call. Beware multiple eval! */
-#define IS_SRF_CALL(node) \
- ((IsA(node, FuncExpr) && ((FuncExpr *) (node))->funcretset) || \
- (IsA(node, OpExpr) && ((OpExpr *) (node))->opretset))
-
/*
* We don't want to include nodes/pathnodes.h here, because non-planner
* code should generally treat PlannerInfo as an opaque typedef.
diff --git a/src/test/modules/test_extensions/Makefile b/src/test/modules/test_extensions/Makefile
index c5a240e8ec9..98451174cdb 100644
--- a/src/test/modules/test_extensions/Makefile
+++ b/src/test/modules/test_extensions/Makefile
@@ -21,6 +21,9 @@ REGRESS = test_extensions #test_extdepend TODO: enable this test after https://g
# force C locale for output stability
NO_LOCALE = 1
+# force C locale for output stability
+NO_LOCALE = 1
+
ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out
index a8686405408..d6a8f8f8545 100644
--- a/src/test/regress/expected/arrays.out
+++ b/src/test/regress/expected/arrays.out
@@ -2586,3 +2586,5 @@ SELECT trim_array(ARRAY[1, 2, 3], -1); -- fail
ERROR: number of elements to trim must be between 0 and 3
SELECT trim_array(ARRAY[1, 2, 3], 10); -- fail
ERROR: number of elements to trim must be between 0 and 3
+SELECT trim_array(ARRAY[]::int[], 1); -- fail
+ERROR: number of elements to trim must be between 0 and 0
diff --git a/src/test/regress/expected/incremental_sort.out b/src/test/regress/expected/incremental_sort.out
index c5f52e1e97f..8a25141d8f5 100644
--- a/src/test/regress/expected/incremental_sort.out
+++ b/src/test/regress/expected/incremental_sort.out
@@ -1804,15 +1804,3 @@ order by 1, 2;
-> Function Scan on generate_series
(7 rows)
--- Disallow pushing down sort when pathkey is an SRF.
-explain (costs off) select unique1 from tenk1 order by unnest('{1,2}'::int[]);
- QUERY PLAN
--------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- Merge Key: (unnest('{1,2}'::anyarray))
- -> Sort
- Sort Key: (unnest('{1,2}'::anyarray))
- -> ProjectSet
- -> Index Only Scan using tenk1_unique1 on tenk1
-(6 rows)
-
diff --git a/src/test/regress/expected/incremental_sort_optimizer.out b/src/test/regress/expected/incremental_sort_optimizer.out
index f5fd24f81ba..0648bb03799 100644
--- a/src/test/regress/expected/incremental_sort_optimizer.out
+++ b/src/test/regress/expected/incremental_sort_optimizer.out
@@ -1658,17 +1658,3 @@ order by 1, 2;
Optimizer: Postgres query optimizer
(8 rows)
--- Disallow pushing down sort when pathkey is an SRF.
-explain (costs off) select unique1 from tenk1 order by unnest('{1,2}'::int[]);
- QUERY PLAN
------------------------------------------------------
- Result
- -> Gather Motion 3:1 (slice1; segments: 3)
- Merge Key: (unnest('{1,2}'::anyarray))
- -> Sort
- Sort Key: (unnest('{1,2}'::anyarray))
- -> ProjectSet
- -> Seq Scan on tenk1
- Optimizer: Pivotal Optimizer (GPORCA)
-(8 rows)
-
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 4de01f4f632..62c4b4f99c2 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -1217,6 +1217,30 @@ SELECT generate_series(1, two), array(select generate_series(1, two))
Optimizer: Postgres query optimizer
(18 rows)
+-- must disallow pushing sort below gather when pathkey contains an SRF
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT unnest(ARRAY[]::integer[]) + 1 AS pathkey
+ FROM tenk1 t1 JOIN tenk1 t2 ON TRUE
+ ORDER BY pathkey;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Sort
+ Output: (((unnest('{}'::integer[])) + 1))
+ Sort Key: (((unnest('{}'::integer[])) + 1))
+ -> Result
+ Output: ((unnest('{}'::integer[])) + 1)
+ -> ProjectSet
+ Output: unnest('{}'::integer[])
+ -> Nested Loop
+ -> Gather
+ Workers Planned: 4
+ -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t1
+ -> Materialize
+ -> Gather
+ Workers Planned: 4
+ -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t2
+(15 rows)
+
-- test passing expanded-value representations to workers
CREATE FUNCTION make_some_array(int,int) returns int[] as
$$declare x int[];
diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out
index 06aff4d5bd0..ba2914d3c7a 100644
--- a/src/test/regress/expected/stats_ext.out
+++ b/src/test/regress/expected/stats_ext.out
@@ -270,14 +270,23 @@ SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_3';
CREATE STATISTICS ab1_exprstat_4 ON date_trunc('day', d) FROM ab1;
-- date_trunc on timestamp is immutable
CREATE STATISTICS ab1_exprstat_5 ON date_trunc('day', c) FROM ab1;
+-- check use of a boolean-returning expression
+CREATE STATISTICS ab1_exprstat_6 ON
+ (case a when 1 then true else false end), b FROM ab1;
-- insert some data and run analyze, to test that these cases build properly
INSERT INTO ab1
-SELECT
- generate_series(1,10),
- generate_series(1,10),
- generate_series('2020-10-01'::timestamp, '2020-10-10'::timestamp, interval '1 day'),
- generate_series('2020-10-01'::timestamptz, '2020-10-10'::timestamptz, interval '1 day');
+SELECT x / 10, x / 3,
+ '2020-10-01'::timestamp + x * interval '1 day',
+ '2020-10-01'::timestamptz + x * interval '1 day'
+FROM generate_series(1, 100) x;
ANALYZE ab1;
+-- apply some stats
+SELECT * FROM check_estimated_rows('SELECT * FROM ab1 WHERE (case a when 1 then true else false end) AND b=2');
+ estimated | actual
+-----------+--------
+ 1 | 0
+(1 row)
+
DROP TABLE ab1;
-- Verify supported object types for extended statistics
CREATE schema tststats;
@@ -1835,7 +1844,8 @@ CREATE TABLE mcv_lists (
b VARCHAR,
filler3 DATE,
c INT,
- d TEXT
+ d TEXT,
+ ia INT[]
)
WITH (autovacuum_enabled = off);
-- random data (no MCV list)
@@ -1905,8 +1915,9 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 A
-- 100 distinct combinations, all in the MCV list
TRUNCATE mcv_lists;
DROP STATISTICS mcv_lists_stats;
-INSERT INTO mcv_lists (a, b, c, filler1)
- SELECT mod(i,100), mod(i,50), mod(i,25), i FROM generate_series(1,5000) s(i);
+INSERT INTO mcv_lists (a, b, c, ia, filler1)
+ SELECT mod(i,100), mod(i,50), mod(i,25), array[mod(i,25)], i
+ FROM generate_series(1,5000) s(i);
ANALYZE mcv_lists;
SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
estimated | actual
@@ -2046,8 +2057,14 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY
1 | 100
(1 row)
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)');
+ estimated | actual
+-----------+--------
+ 4 | 50
+(1 row)
+
-- create statistics
-CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists;
+CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c, ia FROM mcv_lists;
ANALYZE mcv_lists;
SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
estimated | actual
@@ -2193,6 +2210,12 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY
100 | 100
(1 row)
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)');
+ estimated | actual
+-----------+--------
+ 4 | 50
+(1 row)
+
-- check change of unrelated column type does not reset the MCV statistics
ALTER TABLE mcv_lists ALTER COLUMN d TYPE VARCHAR(64);
SELECT d.stxdmcv IS NOT NULL
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index c86a5d265c4..a7757ed2be3 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -2530,24 +2530,42 @@ create table parent (a int) partition by list (a);
create table child1 partition of parent for values in (1);
create trigger tg after insert on parent
for each row execute procedure trig_nothing();
+create trigger tg_stmt after insert on parent
+ for statement execute procedure trig_nothing();
select tgrelid::regclass, tgname, tgenabled from pg_trigger
where tgrelid in ('parent'::regclass, 'child1'::regclass)
- order by tgrelid::regclass::text;
- tgrelid | tgname | tgenabled
----------+--------+-----------
- child1 | tg | O
- parent | tg | O
-(2 rows)
+ order by tgrelid::regclass::text, tgname;
+ tgrelid | tgname | tgenabled
+---------+---------+-----------
+ child1 | tg | O
+ parent | tg | O
+ parent | tg_stmt | O
+(3 rows)
-alter table only parent enable always trigger tg;
+alter table only parent enable always trigger tg; -- no recursion because ONLY
+alter table parent enable always trigger tg_stmt; -- no recursion because statement trigger
select tgrelid::regclass, tgname, tgenabled from pg_trigger
where tgrelid in ('parent'::regclass, 'child1'::regclass)
- order by tgrelid::regclass::text;
- tgrelid | tgname | tgenabled
----------+--------+-----------
- child1 | tg | O
- parent | tg | A
-(2 rows)
+ order by tgrelid::regclass::text, tgname;
+ tgrelid | tgname | tgenabled
+---------+---------+-----------
+ child1 | tg | O
+ parent | tg | A
+ parent | tg_stmt | A
+(3 rows)
+
+-- The following is a no-op for the parent trigger but not so
+-- for the child trigger, so recursion should be applied.
+alter table parent enable always trigger tg;
+select tgrelid::regclass, tgname, tgenabled from pg_trigger
+ where tgrelid in ('parent'::regclass, 'child1'::regclass)
+ order by tgrelid::regclass::text, tgname;
+ tgrelid | tgname | tgenabled
+---------+---------+-----------
+ child1 | tg | A
+ parent | tg | A
+ parent | tg_stmt | A
+(3 rows)
drop table parent, child1;
-- Verify that firing state propagates correctly on creation, too
diff --git a/src/test/regress/sql/arrays.sql b/src/test/regress/sql/arrays.sql
index d10278e32f0..cefe5b05b3b 100644
--- a/src/test/regress/sql/arrays.sql
+++ b/src/test/regress/sql/arrays.sql
@@ -875,3 +875,4 @@ FROM
SELECT trim_array(ARRAY[1, 2, 3], -1); -- fail
SELECT trim_array(ARRAY[1, 2, 3], 10); -- fail
+SELECT trim_array(ARRAY[]::int[], 1); -- fail
diff --git a/src/test/regress/sql/incremental_sort.sql b/src/test/regress/sql/incremental_sort.sql
index afd1dab2045..648eced7e14 100644
--- a/src/test/regress/sql/incremental_sort.sql
+++ b/src/test/regress/sql/incremental_sort.sql
@@ -294,5 +294,3 @@ from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub;
explain (costs off) select sub.unique1, stringu1 || random()::text
from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub
order by 1, 2;
--- Disallow pushing down sort when pathkey is an SRF.
-explain (costs off) select unique1 from tenk1 order by unnest('{1,2}'::int[]);
diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql
index 846066fad05..4a22a001ef9 100644
--- a/src/test/regress/sql/select_parallel.sql
+++ b/src/test/regress/sql/select_parallel.sql
@@ -443,6 +443,12 @@ EXPLAIN (VERBOSE, COSTS OFF)
SELECT generate_series(1, two), array(select generate_series(1, two))
FROM tenk1 ORDER BY tenthous;
+-- must disallow pushing sort below gather when pathkey contains an SRF
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT unnest(ARRAY[]::integer[]) + 1 AS pathkey
+ FROM tenk1 t1 JOIN tenk1 t2 ON TRUE
+ ORDER BY pathkey;
+
-- test passing expanded-value representations to workers
CREATE FUNCTION make_some_array(int,int) returns int[] as
$$declare x int[];
diff --git a/src/test/regress/sql/stats_ext.sql b/src/test/regress/sql/stats_ext.sql
index 744bb00c161..c2a91675045 100644
--- a/src/test/regress/sql/stats_ext.sql
+++ b/src/test/regress/sql/stats_ext.sql
@@ -176,14 +176,21 @@ CREATE STATISTICS ab1_exprstat_4 ON date_trunc('day', d) FROM ab1;
-- date_trunc on timestamp is immutable
CREATE STATISTICS ab1_exprstat_5 ON date_trunc('day', c) FROM ab1;
+-- check use of a boolean-returning expression
+CREATE STATISTICS ab1_exprstat_6 ON
+ (case a when 1 then true else false end), b FROM ab1;
+
-- insert some data and run analyze, to test that these cases build properly
INSERT INTO ab1
-SELECT
- generate_series(1,10),
- generate_series(1,10),
- generate_series('2020-10-01'::timestamp, '2020-10-10'::timestamp, interval '1 day'),
- generate_series('2020-10-01'::timestamptz, '2020-10-10'::timestamptz, interval '1 day');
+SELECT x / 10, x / 3,
+ '2020-10-01'::timestamp + x * interval '1 day',
+ '2020-10-01'::timestamptz + x * interval '1 day'
+FROM generate_series(1, 100) x;
ANALYZE ab1;
+
+-- apply some stats
+SELECT * FROM check_estimated_rows('SELECT * FROM ab1 WHERE (case a when 1 then true else false end) AND b=2');
+
DROP TABLE ab1;
-- Verify supported object types for extended statistics
@@ -923,7 +930,8 @@ CREATE TABLE mcv_lists (
b VARCHAR,
filler3 DATE,
c INT,
- d TEXT
+ d TEXT,
+ ia INT[]
)
WITH (autovacuum_enabled = off);
@@ -972,8 +980,9 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 A
TRUNCATE mcv_lists;
DROP STATISTICS mcv_lists_stats;
-INSERT INTO mcv_lists (a, b, c, filler1)
- SELECT mod(i,100), mod(i,50), mod(i,25), i FROM generate_series(1,5000) s(i);
+INSERT INTO mcv_lists (a, b, c, ia, filler1)
+ SELECT mod(i,100), mod(i,50), mod(i,25), array[mod(i,25)], i
+ FROM generate_series(1,5000) s(i);
ANALYZE mcv_lists;
@@ -1023,8 +1032,10 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY
SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])');
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)');
+
-- create statistics
-CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists;
+CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c, ia FROM mcv_lists;
ANALYZE mcv_lists;
@@ -1076,6 +1087,8 @@ SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY
SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])');
+SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)');
+
-- check change of unrelated column type does not reset the MCV statistics
ALTER TABLE mcv_lists ALTER COLUMN d TYPE VARCHAR(64);
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index d93ba34eeaf..e6fe7211756 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -1845,13 +1845,22 @@ create table parent (a int) partition by list (a);
create table child1 partition of parent for values in (1);
create trigger tg after insert on parent
for each row execute procedure trig_nothing();
+create trigger tg_stmt after insert on parent
+ for statement execute procedure trig_nothing();
select tgrelid::regclass, tgname, tgenabled from pg_trigger
where tgrelid in ('parent'::regclass, 'child1'::regclass)
- order by tgrelid::regclass::text;
-alter table only parent enable always trigger tg;
+ order by tgrelid::regclass::text, tgname;
+alter table only parent enable always trigger tg; -- no recursion because ONLY
+alter table parent enable always trigger tg_stmt; -- no recursion because statement trigger
select tgrelid::regclass, tgname, tgenabled from pg_trigger
where tgrelid in ('parent'::regclass, 'child1'::regclass)
- order by tgrelid::regclass::text;
+ order by tgrelid::regclass::text, tgname;
+-- The following is a no-op for the parent trigger but not so
+-- for the child trigger, so recursion should be applied.
+alter table parent enable always trigger tg;
+select tgrelid::regclass, tgname, tgenabled from pg_trigger
+ where tgrelid in ('parent'::regclass, 'child1'::regclass)
+ order by tgrelid::regclass::text, tgname;
drop table parent, child1;
-- Verify that firing state propagates correctly on creation, too